go get honnef.co/go/tools/cmd/staticcheck in a temp dir

Signed-off-by: Simon Pasquier <spasquie@redhat.com>
This commit is contained in:
Simon Pasquier 2018-10-26 13:49:06 +02:00
parent f6f2778528
commit 790876da60
135 changed files with 13 additions and 37439 deletions

View file

@ -37,7 +37,6 @@ ifneq (,$(wildcard go.mod))
GO111MODULE := on GO111MODULE := on
unexport GOVENDOR unexport GOVENDOR
unexport STATICCHECK
else else
$(warning This repository requires Go >= 1.11 because of Go modules) $(warning This repository requires Go >= 1.11 because of Go modules)
$(warning Some recipes may not work as expected as the current Go runtime is '$(shell $(GO) version)') $(warning Some recipes may not work as expected as the current Go runtime is '$(shell $(GO) version)')
@ -45,11 +44,11 @@ $(warning Some recipes may not work as expected as the current Go runtime is '$(
else else
# This repository isn't using Go modules (yet). # This repository isn't using Go modules (yet).
GOVENDOR := $(FIRST_GOPATH)/bin/govendor GOVENDOR := $(FIRST_GOPATH)/bin/govendor
STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck
unexport GO111MODULE unexport GO111MODULE
endif endif
PROMU := $(FIRST_GOPATH)/bin/promu PROMU := $(FIRST_GOPATH)/bin/promu
STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck
pkgs = ./... pkgs = ./...
PREFIX ?= $(shell pwd) PREFIX ?= $(shell pwd)
@ -112,7 +111,7 @@ common-staticcheck: $(STATICCHECK)
ifeq (,$(wildcard go.mod)) ifeq (,$(wildcard go.mod))
$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs)
else else
GO111MODULE=$(GO111MODULE) $(GO) run $(GOOPTS) honnef.co/go/tools/cmd/staticcheck -ignore "$(STATICCHECK_IGNORE)" -checks "SA*" $(pkgs) $(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" -checks "SA*" $(pkgs)
endif endif
.PHONY: common-unused .PHONY: common-unused
@ -155,9 +154,19 @@ common-docker-tag-latest:
promu: promu:
GOOS= GOARCH= GO111MODULE=off $(GO) get -u github.com/prometheus/promu GOOS= GOARCH= GO111MODULE=off $(GO) get -u github.com/prometheus/promu
ifeq (,$(wildcard go.mod))
.PHONY: $(STATICCHECK) .PHONY: $(STATICCHECK)
$(STATICCHECK): $(STATICCHECK):
ifneq (,$(wildcard go.mod))
# Get staticcheck from a temporary directory to avoid modifying the local go.{mod,sum}.
# See https://github.com/golang/go/issues/27643.
# For now, we are using the next branch of staticcheck because master isn't compatible yet with Go modules.
tmpModule=$$(mktemp -d 2>&1) && \
mkdir -p $${tmpModule}/staticcheck && \
cd "$${tmpModule}"/staticcheck && \
GO111MODULE=on $(GO) mod init example.com/staticcheck && \
GO111MODULE=on GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck@next && \
rm -rf $${tmpModule};
else
GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck
.PHONY: $(GOVENDOR) .PHONY: $(GOVENDOR)

4
go.mod
View file

@ -3,7 +3,6 @@ module github.com/prometheus/prometheus
require ( require (
github.com/Azure/azure-sdk-for-go v0.0.0-20161028183111-bd73d950fa44 github.com/Azure/azure-sdk-for-go v0.0.0-20161028183111-bd73d950fa44
github.com/Azure/go-autorest v10.8.1+incompatible github.com/Azure/go-autorest v10.8.1+incompatible
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/OneOfOne/xxhash v1.2.2 // indirect github.com/OneOfOne/xxhash v1.2.2 // indirect
github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f // indirect github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f // indirect
github.com/VividCortex/ewma v1.1.1 // indirect github.com/VividCortex/ewma v1.1.1 // indirect
@ -143,9 +142,6 @@ require (
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/vmihailenco/msgpack.v2 v2.9.1 // indirect gopkg.in/vmihailenco/msgpack.v2 v2.9.1 // indirect
gopkg.in/yaml.v2 v2.2.0 gopkg.in/yaml.v2 v2.2.0
// honnef.co/go/tools is pinned to the next branch which supports Go modules.
// TODO: switch back to master once it is up-to-date.
honnef.co/go/tools v0.0.0-20180910201051-f1b53a58b022
k8s.io/api v0.0.0-20180628040859-072894a440bd k8s.io/api v0.0.0-20180628040859-072894a440bd
k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d
k8s.io/client-go v8.0.0+incompatible k8s.io/client-go v8.0.0+incompatible

4
go.sum
View file

@ -2,8 +2,6 @@ github.com/Azure/azure-sdk-for-go v0.0.0-20161028183111-bd73d950fa44 h1:L4fLiifs
github.com/Azure/azure-sdk-for-go v0.0.0-20161028183111-bd73d950fa44/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v0.0.0-20161028183111-bd73d950fa44/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/go-autorest v10.8.1+incompatible h1:u0jVQf+a6k6x8A+sT60l6EY9XZu+kHdnZVPAYqpVRo0= github.com/Azure/go-autorest v10.8.1+incompatible h1:u0jVQf+a6k6x8A+sT60l6EY9XZu+kHdnZVPAYqpVRo0=
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f h1:5ZfJxyXo8KyX8DgGXC5B7ILL8y51fci/qYz2B4j8iLY= github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f h1:5ZfJxyXo8KyX8DgGXC5B7ILL8y51fci/qYz2B4j8iLY=
@ -293,8 +291,6 @@ gopkg.in/vmihailenco/msgpack.v2 v2.9.1 h1:kb0VV7NuIojvRfzwslQeP3yArBqJHW9tOl4t38
gopkg.in/vmihailenco/msgpack.v2 v2.9.1/go.mod h1:/3Dn1Npt9+MYyLpYYXjInO/5jvMLamn+AEGwNEOatn8= gopkg.in/vmihailenco/msgpack.v2 v2.9.1/go.mod h1:/3Dn1Npt9+MYyLpYYXjInO/5jvMLamn+AEGwNEOatn8=
gopkg.in/yaml.v2 v2.2.0 h1:ucE2Go3MGv/WipgucyA7X3+4pRLSbl5sd8WaEs60obQ= gopkg.in/yaml.v2 v2.2.0 h1:ucE2Go3MGv/WipgucyA7X3+4pRLSbl5sd8WaEs60obQ=
gopkg.in/yaml.v2 v2.2.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
honnef.co/go/tools v0.0.0-20180910201051-f1b53a58b022 h1:6BZ1uHan+VqtcF3xb+hOvKSWhvQy/nk+ZRtQJJLBWIM=
honnef.co/go/tools v0.0.0-20180910201051-f1b53a58b022/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.0.0-20180628040859-072894a440bd h1:HzgYeLDS1jLxw8DGr68KJh9cdQ5iZJizG0HZWstIhfQ= k8s.io/api v0.0.0-20180628040859-072894a440bd h1:HzgYeLDS1jLxw8DGr68KJh9cdQ5iZJizG0HZWstIhfQ=
k8s.io/api v0.0.0-20180628040859-072894a440bd/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= k8s.io/api v0.0.0-20180628040859-072894a440bd/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d h1:MZjlsu9igBoVPZkXpIGoxI6EonqNsXXZU7hhvfQLkd4= k8s.io/apimachinery v0.0.0-20180621070125-103fd098999d h1:MZjlsu9igBoVPZkXpIGoxI6EonqNsXXZU7hhvfQLkd4=

View file

@ -1,24 +0,0 @@
// Copyright 2018 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Imports of tools we want "go mod vendor" to include.
// https://github.com/golang/go/issues/25624#issuecomment-395556484
// https://github.com/golang/go/issues/25922
// +build tools
package tools
import (
_ "honnef.co/go/tools/cmd/staticcheck"
)

View file

@ -1,5 +0,0 @@
TAGS
tags
.*.swp
tomlcheck/tomlcheck
toml.test

View file

@ -1,15 +0,0 @@
language: go
go:
- 1.1
- 1.2
- 1.3
- 1.4
- 1.5
- 1.6
- tip
install:
- go install ./...
- go get github.com/BurntSushi/toml-test
script:
- export PATH="$PATH:$HOME/gopath/bin"
- make test

View file

@ -1,3 +0,0 @@
Compatible with TOML version
[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)

View file

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2013 TOML authors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View file

@ -1,19 +0,0 @@
install:
go install ./...
test: install
go test -v
toml-test toml-test-decoder
toml-test -encoder toml-test-encoder
fmt:
gofmt -w *.go */*.go
colcheck *.go */*.go
tags:
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
push:
git push origin master
git push github master

View file

@ -1,218 +0,0 @@
## TOML parser and encoder for Go with reflection
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
reflection interface similar to Go's standard library `json` and `xml`
packages. This package also supports the `encoding.TextUnmarshaler` and
`encoding.TextMarshaler` interfaces so that you can define custom data
representations. (There is an example of this below.)
Spec: https://github.com/toml-lang/toml
Compatible with TOML version
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
Documentation: https://godoc.org/github.com/BurntSushi/toml
Installation:
```bash
go get github.com/BurntSushi/toml
```
Try the toml validator:
```bash
go get github.com/BurntSushi/toml/cmd/tomlv
tomlv some-toml-file.toml
```
[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml)
### Testing
This package passes all tests in
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
and the encoder.
### Examples
This package works similarly to how the Go standard library handles `XML`
and `JSON`. Namely, data is loaded into Go values via reflection.
For the simplest example, consider some TOML file as just a list of keys
and values:
```toml
Age = 25
Cats = [ "Cauchy", "Plato" ]
Pi = 3.14
Perfection = [ 6, 28, 496, 8128 ]
DOB = 1987-07-05T05:45:00Z
```
Which could be defined in Go as:
```go
type Config struct {
Age int
Cats []string
Pi float64
Perfection []int
DOB time.Time // requires `import time`
}
```
And then decoded with:
```go
var conf Config
if _, err := toml.Decode(tomlData, &conf); err != nil {
// handle error
}
```
You can also use struct tags if your struct field name doesn't map to a TOML
key value directly:
```toml
some_key_NAME = "wat"
```
```go
type TOML struct {
ObscureKey string `toml:"some_key_NAME"`
}
```
### Using the `encoding.TextUnmarshaler` interface
Here's an example that automatically parses duration strings into
`time.Duration` values:
```toml
[[song]]
name = "Thunder Road"
duration = "4m49s"
[[song]]
name = "Stairway to Heaven"
duration = "8m03s"
```
Which can be decoded with:
```go
type song struct {
Name string
Duration duration
}
type songs struct {
Song []song
}
var favorites songs
if _, err := toml.Decode(blob, &favorites); err != nil {
log.Fatal(err)
}
for _, s := range favorites.Song {
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
}
```
And you'll also need a `duration` type that satisfies the
`encoding.TextUnmarshaler` interface:
```go
type duration struct {
time.Duration
}
func (d *duration) UnmarshalText(text []byte) error {
var err error
d.Duration, err = time.ParseDuration(string(text))
return err
}
```
### More complex usage
Here's an example of how to load the example from the official spec page:
```toml
# This is a TOML document. Boom.
title = "TOML Example"
[owner]
name = "Tom Preston-Werner"
organization = "GitHub"
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
[database]
server = "192.168.1.1"
ports = [ 8001, 8001, 8002 ]
connection_max = 5000
enabled = true
[servers]
# You can indent as you please. Tabs or spaces. TOML don't care.
[servers.alpha]
ip = "10.0.0.1"
dc = "eqdc10"
[servers.beta]
ip = "10.0.0.2"
dc = "eqdc10"
[clients]
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
# Line breaks are OK when inside arrays
hosts = [
"alpha",
"omega"
]
```
And the corresponding Go types are:
```go
type tomlConfig struct {
Title string
Owner ownerInfo
DB database `toml:"database"`
Servers map[string]server
Clients clients
}
type ownerInfo struct {
Name string
Org string `toml:"organization"`
Bio string
DOB time.Time
}
type database struct {
Server string
Ports []int
ConnMax int `toml:"connection_max"`
Enabled bool
}
type server struct {
IP string
DC string
}
type clients struct {
Data [][]interface{}
Hosts []string
}
```
Note that a case insensitive match will be tried if an exact match can't be
found.
A working example of the above can be found in `_examples/example.{go,toml}`.

View file

@ -1,509 +0,0 @@
package toml
import (
"fmt"
"io"
"io/ioutil"
"math"
"reflect"
"strings"
"time"
)
func e(format string, args ...interface{}) error {
return fmt.Errorf("toml: "+format, args...)
}
// Unmarshaler is the interface implemented by objects that can unmarshal a
// TOML description of themselves.
type Unmarshaler interface {
UnmarshalTOML(interface{}) error
}
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
func Unmarshal(p []byte, v interface{}) error {
_, err := Decode(string(p), v)
return err
}
// Primitive is a TOML value that hasn't been decoded into a Go value.
// When using the various `Decode*` functions, the type `Primitive` may
// be given to any value, and its decoding will be delayed.
//
// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
//
// The underlying representation of a `Primitive` value is subject to change.
// Do not rely on it.
//
// N.B. Primitive values are still parsed, so using them will only avoid
// the overhead of reflection. They can be useful when you don't know the
// exact type of TOML data until run time.
type Primitive struct {
undecoded interface{}
context Key
}
// DEPRECATED!
//
// Use MetaData.PrimitiveDecode instead.
func PrimitiveDecode(primValue Primitive, v interface{}) error {
md := MetaData{decoded: make(map[string]bool)}
return md.unify(primValue.undecoded, rvalue(v))
}
// PrimitiveDecode is just like the other `Decode*` functions, except it
// decodes a TOML value that has already been parsed. Valid primitive values
// can *only* be obtained from values filled by the decoder functions,
// including this method. (i.e., `v` may contain more `Primitive`
// values.)
//
// Meta data for primitive values is included in the meta data returned by
// the `Decode*` functions with one exception: keys returned by the Undecoded
// method will only reflect keys that were decoded. Namely, any keys hidden
// behind a Primitive will be considered undecoded. Executing this method will
// update the undecoded keys in the meta data. (See the example.)
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
md.context = primValue.context
defer func() { md.context = nil }()
return md.unify(primValue.undecoded, rvalue(v))
}
// Decode will decode the contents of `data` in TOML format into a pointer
// `v`.
//
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
// used interchangeably.)
//
// TOML arrays of tables correspond to either a slice of structs or a slice
// of maps.
//
// TOML datetimes correspond to Go `time.Time` values.
//
// All other TOML types (float, string, int, bool and array) correspond
// to the obvious Go types.
//
// An exception to the above rules is if a type implements the
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
// (floats, strings, integers, booleans and datetimes) will be converted to
// a byte string and given to the value's UnmarshalText method. See the
// Unmarshaler example for a demonstration with time duration strings.
//
// Key mapping
//
// TOML keys can map to either keys in a Go map or field names in a Go
// struct. The special `toml` struct tag may be used to map TOML keys to
// struct fields that don't match the key name exactly. (See the example.)
// A case insensitive match to struct names will be tried if an exact match
// can't be found.
//
// The mapping between TOML values and Go values is loose. That is, there
// may exist TOML values that cannot be placed into your representation, and
// there may be parts of your representation that do not correspond to
// TOML values. This loose mapping can be made stricter by using the IsDefined
// and/or Undecoded methods on the MetaData returned.
//
// This decoder will not handle cyclic types. If a cyclic type is passed,
// `Decode` will not terminate.
func Decode(data string, v interface{}) (MetaData, error) {
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr {
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
}
if rv.IsNil() {
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
}
p, err := parse(data)
if err != nil {
return MetaData{}, err
}
md := MetaData{
p.mapping, p.types, p.ordered,
make(map[string]bool, len(p.ordered)), nil,
}
return md, md.unify(p.mapping, indirect(rv))
}
// DecodeFile is just like Decode, except it will automatically read the
// contents of the file at `fpath` and decode it for you.
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadFile(fpath)
if err != nil {
return MetaData{}, err
}
return Decode(string(bs), v)
}
// DecodeReader is just like Decode, except it will consume all bytes
// from the reader and decode it for you.
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
bs, err := ioutil.ReadAll(r)
if err != nil {
return MetaData{}, err
}
return Decode(string(bs), v)
}
// unify performs a sort of type unification based on the structure of `rv`,
// which is the client representation.
//
// Any type mismatch produces an error. Finding a type that we don't know
// how to handle produces an unsupported type error.
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
// Special case. Look for a `Primitive` value.
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
// Save the undecoded data and the key context into the primitive
// value.
context := make(Key, len(md.context))
copy(context, md.context)
rv.Set(reflect.ValueOf(Primitive{
undecoded: data,
context: context,
}))
return nil
}
// Special case. Unmarshaler Interface support.
if rv.CanAddr() {
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
return v.UnmarshalTOML(data)
}
}
// Special case. Handle time.Time values specifically.
// TODO: Remove this code when we decide to drop support for Go 1.1.
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
// interfaces.
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
return md.unifyDatetime(data, rv)
}
// Special case. Look for a value satisfying the TextUnmarshaler interface.
if v, ok := rv.Interface().(TextUnmarshaler); ok {
return md.unifyText(data, v)
}
// BUG(burntsushi)
// The behavior here is incorrect whenever a Go type satisfies the
// encoding.TextUnmarshaler interface but also corresponds to a TOML
// hash or array. In particular, the unmarshaler should only be applied
// to primitive TOML values. But at this point, it will be applied to
// all kinds of values and produce an incorrect error whenever those values
// are hashes or arrays (including arrays of tables).
k := rv.Kind()
// laziness
if k >= reflect.Int && k <= reflect.Uint64 {
return md.unifyInt(data, rv)
}
switch k {
case reflect.Ptr:
elem := reflect.New(rv.Type().Elem())
err := md.unify(data, reflect.Indirect(elem))
if err != nil {
return err
}
rv.Set(elem)
return nil
case reflect.Struct:
return md.unifyStruct(data, rv)
case reflect.Map:
return md.unifyMap(data, rv)
case reflect.Array:
return md.unifyArray(data, rv)
case reflect.Slice:
return md.unifySlice(data, rv)
case reflect.String:
return md.unifyString(data, rv)
case reflect.Bool:
return md.unifyBool(data, rv)
case reflect.Interface:
// we only support empty interfaces.
if rv.NumMethod() > 0 {
return e("unsupported type %s", rv.Type())
}
return md.unifyAnything(data, rv)
case reflect.Float32:
fallthrough
case reflect.Float64:
return md.unifyFloat64(data, rv)
}
return e("unsupported type %s", rv.Kind())
}
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
tmap, ok := mapping.(map[string]interface{})
if !ok {
if mapping == nil {
return nil
}
return e("type mismatch for %s: expected table but found %T",
rv.Type().String(), mapping)
}
for key, datum := range tmap {
var f *field
fields := cachedTypeFields(rv.Type())
for i := range fields {
ff := &fields[i]
if ff.name == key {
f = ff
break
}
if f == nil && strings.EqualFold(ff.name, key) {
f = ff
}
}
if f != nil {
subv := rv
for _, i := range f.index {
subv = indirect(subv.Field(i))
}
if isUnifiable(subv) {
md.decoded[md.context.add(key).String()] = true
md.context = append(md.context, key)
if err := md.unify(datum, subv); err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
} else if f.name != "" {
// Bad user! No soup for you!
return e("cannot write unexported field %s.%s",
rv.Type().String(), f.name)
}
}
}
return nil
}
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
tmap, ok := mapping.(map[string]interface{})
if !ok {
if tmap == nil {
return nil
}
return badtype("map", mapping)
}
if rv.IsNil() {
rv.Set(reflect.MakeMap(rv.Type()))
}
for k, v := range tmap {
md.decoded[md.context.add(k).String()] = true
md.context = append(md.context, k)
rvkey := indirect(reflect.New(rv.Type().Key()))
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
if err := md.unify(v, rvval); err != nil {
return err
}
md.context = md.context[0 : len(md.context)-1]
rvkey.SetString(k)
rv.SetMapIndex(rvkey, rvval)
}
return nil
}
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
return nil
}
return badtype("slice", data)
}
sliceLen := datav.Len()
if sliceLen != rv.Len() {
return e("expected array length %d; got TOML array of length %d",
rv.Len(), sliceLen)
}
return md.unifySliceArray(datav, rv)
}
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
datav := reflect.ValueOf(data)
if datav.Kind() != reflect.Slice {
if !datav.IsValid() {
return nil
}
return badtype("slice", data)
}
n := datav.Len()
if rv.IsNil() || rv.Cap() < n {
rv.Set(reflect.MakeSlice(rv.Type(), n, n))
}
rv.SetLen(n)
return md.unifySliceArray(datav, rv)
}
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
sliceLen := data.Len()
for i := 0; i < sliceLen; i++ {
v := data.Index(i).Interface()
sliceval := indirect(rv.Index(i))
if err := md.unify(v, sliceval); err != nil {
return err
}
}
return nil
}
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
if _, ok := data.(time.Time); ok {
rv.Set(reflect.ValueOf(data))
return nil
}
return badtype("time.Time", data)
}
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
if s, ok := data.(string); ok {
rv.SetString(s)
return nil
}
return badtype("string", data)
}
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
if num, ok := data.(float64); ok {
switch rv.Kind() {
case reflect.Float32:
fallthrough
case reflect.Float64:
rv.SetFloat(num)
default:
panic("bug")
}
return nil
}
return badtype("float", data)
}
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
if num, ok := data.(int64); ok {
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
switch rv.Kind() {
case reflect.Int, reflect.Int64:
// No bounds checking necessary.
case reflect.Int8:
if num < math.MinInt8 || num > math.MaxInt8 {
return e("value %d is out of range for int8", num)
}
case reflect.Int16:
if num < math.MinInt16 || num > math.MaxInt16 {
return e("value %d is out of range for int16", num)
}
case reflect.Int32:
if num < math.MinInt32 || num > math.MaxInt32 {
return e("value %d is out of range for int32", num)
}
}
rv.SetInt(num)
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
unum := uint64(num)
switch rv.Kind() {
case reflect.Uint, reflect.Uint64:
// No bounds checking necessary.
case reflect.Uint8:
if num < 0 || unum > math.MaxUint8 {
return e("value %d is out of range for uint8", num)
}
case reflect.Uint16:
if num < 0 || unum > math.MaxUint16 {
return e("value %d is out of range for uint16", num)
}
case reflect.Uint32:
if num < 0 || unum > math.MaxUint32 {
return e("value %d is out of range for uint32", num)
}
}
rv.SetUint(unum)
} else {
panic("unreachable")
}
return nil
}
return badtype("integer", data)
}
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
if b, ok := data.(bool); ok {
rv.SetBool(b)
return nil
}
return badtype("boolean", data)
}
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
rv.Set(reflect.ValueOf(data))
return nil
}
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
var s string
switch sdata := data.(type) {
case TextMarshaler:
text, err := sdata.MarshalText()
if err != nil {
return err
}
s = string(text)
case fmt.Stringer:
s = sdata.String()
case string:
s = sdata
case bool:
s = fmt.Sprintf("%v", sdata)
case int64:
s = fmt.Sprintf("%d", sdata)
case float64:
s = fmt.Sprintf("%f", sdata)
default:
return badtype("primitive (string-like)", data)
}
if err := v.UnmarshalText([]byte(s)); err != nil {
return err
}
return nil
}
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
func rvalue(v interface{}) reflect.Value {
return indirect(reflect.ValueOf(v))
}
// indirect returns the value pointed to by a pointer.
// Pointers are followed until the value is not a pointer.
// New values are allocated for each nil pointer.
//
// An exception to this rule is if the value satisfies an interface of
// interest to us (like encoding.TextUnmarshaler).
func indirect(v reflect.Value) reflect.Value {
if v.Kind() != reflect.Ptr {
if v.CanSet() {
pv := v.Addr()
if _, ok := pv.Interface().(TextUnmarshaler); ok {
return pv
}
}
return v
}
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
return indirect(reflect.Indirect(v))
}
func isUnifiable(rv reflect.Value) bool {
if rv.CanSet() {
return true
}
if _, ok := rv.Interface().(TextUnmarshaler); ok {
return true
}
return false
}
func badtype(expected string, data interface{}) error {
return e("cannot load TOML value of type %T into a Go %s", data, expected)
}

View file

@ -1,121 +0,0 @@
package toml
import "strings"
// MetaData allows access to meta information about TOML data that may not
// be inferrable via reflection. In particular, whether a key has been defined
// and the TOML type of a key.
type MetaData struct {
mapping map[string]interface{}
types map[string]tomlType
keys []Key
decoded map[string]bool
context Key // Used only during decoding.
}
// IsDefined returns true if the key given exists in the TOML data. The key
// should be specified hierarchially. e.g.,
//
// // access the TOML key 'a.b.c'
// IsDefined("a", "b", "c")
//
// IsDefined will return false if an empty key given. Keys are case sensitive.
func (md *MetaData) IsDefined(key ...string) bool {
if len(key) == 0 {
return false
}
var hash map[string]interface{}
var ok bool
var hashOrVal interface{} = md.mapping
for _, k := range key {
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
return false
}
if hashOrVal, ok = hash[k]; !ok {
return false
}
}
return true
}
// Type returns a string representation of the type of the key specified.
//
// Type will return the empty string if given an empty key or a key that
// does not exist. Keys are case sensitive.
func (md *MetaData) Type(key ...string) string {
fullkey := strings.Join(key, ".")
if typ, ok := md.types[fullkey]; ok {
return typ.typeString()
}
return ""
}
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
// to get values of this type.
type Key []string
func (k Key) String() string {
return strings.Join(k, ".")
}
func (k Key) maybeQuotedAll() string {
var ss []string
for i := range k {
ss = append(ss, k.maybeQuoted(i))
}
return strings.Join(ss, ".")
}
func (k Key) maybeQuoted(i int) string {
quote := false
for _, c := range k[i] {
if !isBareKeyChar(c) {
quote = true
break
}
}
if quote {
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
}
return k[i]
}
func (k Key) add(piece string) Key {
newKey := make(Key, len(k)+1)
copy(newKey, k)
newKey[len(k)] = piece
return newKey
}
// Keys returns a slice of every key in the TOML data, including key groups.
// Each key is itself a slice, where the first element is the top of the
// hierarchy and the last is the most specific.
//
// The list will have the same order as the keys appeared in the TOML data.
//
// All keys returned are non-empty.
func (md *MetaData) Keys() []Key {
return md.keys
}
// Undecoded returns all keys that have not been decoded in the order in which
// they appear in the original TOML document.
//
// This includes keys that haven't been decoded because of a Primitive value.
// Once the Primitive value is decoded, the keys will be considered decoded.
//
// Also note that decoding into an empty interface will result in no decoding,
// and so no keys will be considered decoded.
//
// In this sense, the Undecoded keys correspond to keys in the TOML document
// that do not have a concrete type in your representation.
func (md *MetaData) Undecoded() []Key {
undecoded := make([]Key, 0, len(md.keys))
for _, key := range md.keys {
if !md.decoded[key.String()] {
undecoded = append(undecoded, key)
}
}
return undecoded
}

View file

@ -1,27 +0,0 @@
/*
Package toml provides facilities for decoding and encoding TOML configuration
files via reflection. There is also support for delaying decoding with
the Primitive type, and querying the set of keys in a TOML document with the
MetaData type.
The specification implemented: https://github.com/toml-lang/toml
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
whether a file is a valid TOML document. It can also be used to print the
type of each key in a TOML document.
Testing
There are two important types of tests used for this package. The first is
contained inside '*_test.go' files and uses the standard Go unit testing
framework. These tests are primarily devoted to holistically testing the
decoder and encoder.
The second type of testing is used to verify the implementation's adherence
to the TOML specification. These tests have been factored into their own
project: https://github.com/BurntSushi/toml-test
The reason the tests are in a separate project is so that they can be used by
any implementation of TOML. Namely, it is language agnostic.
*/
package toml

View file

@ -1,568 +0,0 @@
package toml
import (
"bufio"
"errors"
"fmt"
"io"
"reflect"
"sort"
"strconv"
"strings"
"time"
)
type tomlEncodeError struct{ error }
var (
errArrayMixedElementTypes = errors.New(
"toml: cannot encode array with mixed element types")
errArrayNilElement = errors.New(
"toml: cannot encode array with nil element")
errNonString = errors.New(
"toml: cannot encode a map with non-string key type")
errAnonNonStruct = errors.New(
"toml: cannot encode an anonymous field that is not a struct")
errArrayNoTable = errors.New(
"toml: TOML array element cannot contain a table")
errNoKey = errors.New(
"toml: top-level values must be Go maps or structs")
errAnything = errors.New("") // used in testing
)
var quotedReplacer = strings.NewReplacer(
"\t", "\\t",
"\n", "\\n",
"\r", "\\r",
"\"", "\\\"",
"\\", "\\\\",
)
// Encoder controls the encoding of Go values to a TOML document to some
// io.Writer.
//
// The indentation level can be controlled with the Indent field.
type Encoder struct {
// A single indentation level. By default it is two spaces.
Indent string
// hasWritten is whether we have written any output to w yet.
hasWritten bool
w *bufio.Writer
}
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
// given. By default, a single indentation level is 2 spaces.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{
w: bufio.NewWriter(w),
Indent: " ",
}
}
// Encode writes a TOML representation of the Go value to the underlying
// io.Writer. If the value given cannot be encoded to a valid TOML document,
// then an error is returned.
//
// The mapping between Go values and TOML values should be precisely the same
// as for the Decode* functions. Similarly, the TextMarshaler interface is
// supported by encoding the resulting bytes as strings. (If you want to write
// arbitrary binary data then you will need to use something like base64 since
// TOML does not have any binary types.)
//
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
// sub-hashes are encoded first.
//
// If a Go map is encoded, then its keys are sorted alphabetically for
// deterministic output. More control over this behavior may be provided if
// there is demand for it.
//
// Encoding Go values without a corresponding TOML representation---like map
// types with non-string keys---will cause an error to be returned. Similarly
// for mixed arrays/slices, arrays/slices with nil elements, embedded
// non-struct types and nested slices containing maps or structs.
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
// and so is []map[string][]string.)
func (enc *Encoder) Encode(v interface{}) error {
rv := eindirect(reflect.ValueOf(v))
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
return err
}
return enc.w.Flush()
}
func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
defer func() {
if r := recover(); r != nil {
if terr, ok := r.(tomlEncodeError); ok {
err = terr.error
return
}
panic(r)
}
}()
enc.encode(key, rv)
return nil
}
func (enc *Encoder) encode(key Key, rv reflect.Value) {
// Special case. Time needs to be in ISO8601 format.
// Special case. If we can marshal the type to text, then we used that.
// Basically, this prevents the encoder for handling these types as
// generic structs (or whatever the underlying type of a TextMarshaler is).
switch rv.Interface().(type) {
case time.Time, TextMarshaler:
enc.keyEqElement(key, rv)
return
}
k := rv.Kind()
switch k {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64,
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
enc.keyEqElement(key, rv)
case reflect.Array, reflect.Slice:
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
enc.eArrayOfTables(key, rv)
} else {
enc.keyEqElement(key, rv)
}
case reflect.Interface:
if rv.IsNil() {
return
}
enc.encode(key, rv.Elem())
case reflect.Map:
if rv.IsNil() {
return
}
enc.eTable(key, rv)
case reflect.Ptr:
if rv.IsNil() {
return
}
enc.encode(key, rv.Elem())
case reflect.Struct:
enc.eTable(key, rv)
default:
panic(e("unsupported type for key '%s': %s", key, k))
}
}
// eElement encodes any value that can be an array element (primitives and
// arrays).
func (enc *Encoder) eElement(rv reflect.Value) {
switch v := rv.Interface().(type) {
case time.Time:
// Special case time.Time as a primitive. Has to come before
// TextMarshaler below because time.Time implements
// encoding.TextMarshaler, but we need to always use UTC.
enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
return
case TextMarshaler:
// Special case. Use text marshaler if it's available for this value.
if s, err := v.MarshalText(); err != nil {
encPanic(err)
} else {
enc.writeQuoted(string(s))
}
return
}
switch rv.Kind() {
case reflect.Bool:
enc.wf(strconv.FormatBool(rv.Bool()))
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64:
enc.wf(strconv.FormatInt(rv.Int(), 10))
case reflect.Uint, reflect.Uint8, reflect.Uint16,
reflect.Uint32, reflect.Uint64:
enc.wf(strconv.FormatUint(rv.Uint(), 10))
case reflect.Float32:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
case reflect.Float64:
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
case reflect.Array, reflect.Slice:
enc.eArrayOrSliceElement(rv)
case reflect.Interface:
enc.eElement(rv.Elem())
case reflect.String:
enc.writeQuoted(rv.String())
default:
panic(e("unexpected primitive type: %s", rv.Kind()))
}
}
// By the TOML spec, all floats must have a decimal with at least one
// number on either side.
func floatAddDecimal(fstr string) string {
if !strings.Contains(fstr, ".") {
return fstr + ".0"
}
return fstr
}
func (enc *Encoder) writeQuoted(s string) {
enc.wf("\"%s\"", quotedReplacer.Replace(s))
}
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
length := rv.Len()
enc.wf("[")
for i := 0; i < length; i++ {
elem := rv.Index(i)
enc.eElement(elem)
if i != length-1 {
enc.wf(", ")
}
}
enc.wf("]")
}
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
if len(key) == 0 {
encPanic(errNoKey)
}
for i := 0; i < rv.Len(); i++ {
trv := rv.Index(i)
if isNil(trv) {
continue
}
panicIfInvalidKey(key)
enc.newline()
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline()
enc.eMapOrStruct(key, trv)
}
}
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
panicIfInvalidKey(key)
if len(key) == 1 {
// Output an extra newline between top-level tables.
// (The newline isn't written if nothing else has been written though.)
enc.newline()
}
if len(key) > 0 {
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
enc.newline()
}
enc.eMapOrStruct(key, rv)
}
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
switch rv := eindirect(rv); rv.Kind() {
case reflect.Map:
enc.eMap(key, rv)
case reflect.Struct:
enc.eStruct(key, rv)
default:
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
}
}
func (enc *Encoder) eMap(key Key, rv reflect.Value) {
rt := rv.Type()
if rt.Key().Kind() != reflect.String {
encPanic(errNonString)
}
// Sort keys so that we have deterministic output. And write keys directly
// underneath this key first, before writing sub-structs or sub-maps.
var mapKeysDirect, mapKeysSub []string
for _, mapKey := range rv.MapKeys() {
k := mapKey.String()
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
mapKeysSub = append(mapKeysSub, k)
} else {
mapKeysDirect = append(mapKeysDirect, k)
}
}
var writeMapKeys = func(mapKeys []string) {
sort.Strings(mapKeys)
for _, mapKey := range mapKeys {
mrv := rv.MapIndex(reflect.ValueOf(mapKey))
if isNil(mrv) {
// Don't write anything for nil fields.
continue
}
enc.encode(key.add(mapKey), mrv)
}
}
writeMapKeys(mapKeysDirect)
writeMapKeys(mapKeysSub)
}
func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
// Write keys for fields directly under this key first, because if we write
// a field that creates a new table, then all keys under it will be in that
// table (not the one we're writing here).
rt := rv.Type()
var fieldsDirect, fieldsSub [][]int
var addFields func(rt reflect.Type, rv reflect.Value, start []int)
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
for i := 0; i < rt.NumField(); i++ {
f := rt.Field(i)
// skip unexported fields
if f.PkgPath != "" && !f.Anonymous {
continue
}
frv := rv.Field(i)
if f.Anonymous {
t := f.Type
switch t.Kind() {
case reflect.Struct:
// Treat anonymous struct fields with
// tag names as though they are not
// anonymous, like encoding/json does.
if getOptions(f.Tag).name == "" {
addFields(t, frv, f.Index)
continue
}
case reflect.Ptr:
if t.Elem().Kind() == reflect.Struct &&
getOptions(f.Tag).name == "" {
if !frv.IsNil() {
addFields(t.Elem(), frv.Elem(), f.Index)
}
continue
}
// Fall through to the normal field encoding logic below
// for non-struct anonymous fields.
}
}
if typeIsHash(tomlTypeOfGo(frv)) {
fieldsSub = append(fieldsSub, append(start, f.Index...))
} else {
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
}
}
}
addFields(rt, rv, nil)
var writeFields = func(fields [][]int) {
for _, fieldIndex := range fields {
sft := rt.FieldByIndex(fieldIndex)
sf := rv.FieldByIndex(fieldIndex)
if isNil(sf) {
// Don't write anything for nil fields.
continue
}
opts := getOptions(sft.Tag)
if opts.skip {
continue
}
keyName := sft.Name
if opts.name != "" {
keyName = opts.name
}
if opts.omitempty && isEmpty(sf) {
continue
}
if opts.omitzero && isZero(sf) {
continue
}
enc.encode(key.add(keyName), sf)
}
}
writeFields(fieldsDirect)
writeFields(fieldsSub)
}
// tomlTypeName returns the TOML type name of the Go value's type. It is
// used to determine whether the types of array elements are mixed (which is
// forbidden). If the Go value is nil, then it is illegal for it to be an array
// element, and valueIsNil is returned as true.
// Returns the TOML type of a Go value. The type may be `nil`, which means
// no concrete TOML type could be found.
func tomlTypeOfGo(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() {
return nil
}
switch rv.Kind() {
case reflect.Bool:
return tomlBool
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
reflect.Uint64:
return tomlInteger
case reflect.Float32, reflect.Float64:
return tomlFloat
case reflect.Array, reflect.Slice:
if typeEqual(tomlHash, tomlArrayType(rv)) {
return tomlArrayHash
}
return tomlArray
case reflect.Ptr, reflect.Interface:
return tomlTypeOfGo(rv.Elem())
case reflect.String:
return tomlString
case reflect.Map:
return tomlHash
case reflect.Struct:
switch rv.Interface().(type) {
case time.Time:
return tomlDatetime
case TextMarshaler:
return tomlString
default:
return tomlHash
}
default:
panic("unexpected reflect.Kind: " + rv.Kind().String())
}
}
// tomlArrayType returns the element type of a TOML array. The type returned
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
// slize). This function may also panic if it finds a type that cannot be
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
// nested arrays of tables).
func tomlArrayType(rv reflect.Value) tomlType {
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
return nil
}
firstType := tomlTypeOfGo(rv.Index(0))
if firstType == nil {
encPanic(errArrayNilElement)
}
rvlen := rv.Len()
for i := 1; i < rvlen; i++ {
elem := rv.Index(i)
switch elemType := tomlTypeOfGo(elem); {
case elemType == nil:
encPanic(errArrayNilElement)
case !typeEqual(firstType, elemType):
encPanic(errArrayMixedElementTypes)
}
}
// If we have a nested array, then we must make sure that the nested
// array contains ONLY primitives.
// This checks arbitrarily nested arrays.
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
nest := tomlArrayType(eindirect(rv.Index(0)))
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
encPanic(errArrayNoTable)
}
}
return firstType
}
type tagOptions struct {
skip bool // "-"
name string
omitempty bool
omitzero bool
}
func getOptions(tag reflect.StructTag) tagOptions {
t := tag.Get("toml")
if t == "-" {
return tagOptions{skip: true}
}
var opts tagOptions
parts := strings.Split(t, ",")
opts.name = parts[0]
for _, s := range parts[1:] {
switch s {
case "omitempty":
opts.omitempty = true
case "omitzero":
opts.omitzero = true
}
}
return opts
}
func isZero(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return rv.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return rv.Uint() == 0
case reflect.Float32, reflect.Float64:
return rv.Float() == 0.0
}
return false
}
func isEmpty(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
return rv.Len() == 0
case reflect.Bool:
return !rv.Bool()
}
return false
}
func (enc *Encoder) newline() {
if enc.hasWritten {
enc.wf("\n")
}
}
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
if len(key) == 0 {
encPanic(errNoKey)
}
panicIfInvalidKey(key)
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
enc.eElement(val)
enc.newline()
}
func (enc *Encoder) wf(format string, v ...interface{}) {
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
encPanic(err)
}
enc.hasWritten = true
}
func (enc *Encoder) indentStr(key Key) string {
return strings.Repeat(enc.Indent, len(key)-1)
}
func encPanic(err error) {
panic(tomlEncodeError{err})
}
func eindirect(v reflect.Value) reflect.Value {
switch v.Kind() {
case reflect.Ptr, reflect.Interface:
return eindirect(v.Elem())
default:
return v
}
}
func isNil(rv reflect.Value) bool {
switch rv.Kind() {
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
return rv.IsNil()
default:
return false
}
}
func panicIfInvalidKey(key Key) {
for _, k := range key {
if len(k) == 0 {
encPanic(e("Key '%s' is not a valid table name. Key names "+
"cannot be empty.", key.maybeQuotedAll()))
}
}
}
func isValidKeyName(s string) bool {
return len(s) != 0
}

View file

@ -1,19 +0,0 @@
// +build go1.2
package toml
// In order to support Go 1.1, we define our own TextMarshaler and
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
// standard library interfaces.
import (
"encoding"
)
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler encoding.TextMarshaler
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler encoding.TextUnmarshaler

View file

@ -1,18 +0,0 @@
// +build !go1.2
package toml
// These interfaces were introduced in Go 1.2, so we add them manually when
// compiling for Go 1.1.
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
// so that Go 1.1 can be supported.
type TextMarshaler interface {
MarshalText() (text []byte, err error)
}
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
// here so that Go 1.1 can be supported.
type TextUnmarshaler interface {
UnmarshalText(text []byte) error
}

View file

@ -1,953 +0,0 @@
package toml
import (
"fmt"
"strings"
"unicode"
"unicode/utf8"
)
type itemType int
const (
itemError itemType = iota
itemNIL // used in the parser to indicate no type
itemEOF
itemText
itemString
itemRawString
itemMultilineString
itemRawMultilineString
itemBool
itemInteger
itemFloat
itemDatetime
itemArray // the start of an array
itemArrayEnd
itemTableStart
itemTableEnd
itemArrayTableStart
itemArrayTableEnd
itemKeyStart
itemCommentStart
itemInlineTableStart
itemInlineTableEnd
)
const (
eof = 0
comma = ','
tableStart = '['
tableEnd = ']'
arrayTableStart = '['
arrayTableEnd = ']'
tableSep = '.'
keySep = '='
arrayStart = '['
arrayEnd = ']'
commentStart = '#'
stringStart = '"'
stringEnd = '"'
rawStringStart = '\''
rawStringEnd = '\''
inlineTableStart = '{'
inlineTableEnd = '}'
)
type stateFn func(lx *lexer) stateFn
type lexer struct {
input string
start int
pos int
line int
state stateFn
items chan item
// Allow for backing up up to three runes.
// This is necessary because TOML contains 3-rune tokens (""" and ''').
prevWidths [3]int
nprev int // how many of prevWidths are in use
// If we emit an eof, we can still back up, but it is not OK to call
// next again.
atEOF bool
// A stack of state functions used to maintain context.
// The idea is to reuse parts of the state machine in various places.
// For example, values can appear at the top level or within arbitrarily
// nested arrays. The last state on the stack is used after a value has
// been lexed. Similarly for comments.
stack []stateFn
}
type item struct {
typ itemType
val string
line int
}
func (lx *lexer) nextItem() item {
for {
select {
case item := <-lx.items:
return item
default:
lx.state = lx.state(lx)
}
}
}
func lex(input string) *lexer {
lx := &lexer{
input: input,
state: lexTop,
line: 1,
items: make(chan item, 10),
stack: make([]stateFn, 0, 10),
}
return lx
}
func (lx *lexer) push(state stateFn) {
lx.stack = append(lx.stack, state)
}
func (lx *lexer) pop() stateFn {
if len(lx.stack) == 0 {
return lx.errorf("BUG in lexer: no states to pop")
}
last := lx.stack[len(lx.stack)-1]
lx.stack = lx.stack[0 : len(lx.stack)-1]
return last
}
func (lx *lexer) current() string {
return lx.input[lx.start:lx.pos]
}
func (lx *lexer) emit(typ itemType) {
lx.items <- item{typ, lx.current(), lx.line}
lx.start = lx.pos
}
func (lx *lexer) emitTrim(typ itemType) {
lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
lx.start = lx.pos
}
func (lx *lexer) next() (r rune) {
if lx.atEOF {
panic("next called after EOF")
}
if lx.pos >= len(lx.input) {
lx.atEOF = true
return eof
}
if lx.input[lx.pos] == '\n' {
lx.line++
}
lx.prevWidths[2] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[0]
if lx.nprev < 3 {
lx.nprev++
}
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
lx.prevWidths[0] = w
lx.pos += w
return r
}
// ignore skips over the pending input before this point.
func (lx *lexer) ignore() {
lx.start = lx.pos
}
// backup steps back one rune. Can be called only twice between calls to next.
func (lx *lexer) backup() {
if lx.atEOF {
lx.atEOF = false
return
}
if lx.nprev < 1 {
panic("backed up too far")
}
w := lx.prevWidths[0]
lx.prevWidths[0] = lx.prevWidths[1]
lx.prevWidths[1] = lx.prevWidths[2]
lx.nprev--
lx.pos -= w
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
lx.line--
}
}
// accept consumes the next rune if it's equal to `valid`.
func (lx *lexer) accept(valid rune) bool {
if lx.next() == valid {
return true
}
lx.backup()
return false
}
// peek returns but does not consume the next rune in the input.
func (lx *lexer) peek() rune {
r := lx.next()
lx.backup()
return r
}
// skip ignores all input that matches the given predicate.
func (lx *lexer) skip(pred func(rune) bool) {
for {
r := lx.next()
if pred(r) {
continue
}
lx.backup()
lx.ignore()
return
}
}
// errorf stops all lexing by emitting an error and returning `nil`.
// Note that any value that is a character is escaped if it's a special
// character (newlines, tabs, etc.).
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
lx.items <- item{
itemError,
fmt.Sprintf(format, values...),
lx.line,
}
return nil
}
// lexTop consumes elements at the top level of TOML data.
func lexTop(lx *lexer) stateFn {
r := lx.next()
if isWhitespace(r) || isNL(r) {
return lexSkip(lx, lexTop)
}
switch r {
case commentStart:
lx.push(lexTop)
return lexCommentStart
case tableStart:
return lexTableStart
case eof:
if lx.pos > lx.start {
return lx.errorf("unexpected EOF")
}
lx.emit(itemEOF)
return nil
}
// At this point, the only valid item can be a key, so we back up
// and let the key lexer do the rest.
lx.backup()
lx.push(lexTopEnd)
return lexKeyStart
}
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
// or a table.) It must see only whitespace, and will turn back to lexTop
// upon a newline. If it sees EOF, it will quit the lexer successfully.
func lexTopEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case r == commentStart:
// a comment will read to a newline for us.
lx.push(lexTop)
return lexCommentStart
case isWhitespace(r):
return lexTopEnd
case isNL(r):
lx.ignore()
return lexTop
case r == eof:
lx.emit(itemEOF)
return nil
}
return lx.errorf("expected a top-level item to end with a newline, "+
"comment, or EOF, but got %q instead", r)
}
// lexTable lexes the beginning of a table. Namely, it makes sure that
// it starts with a character other than '.' and ']'.
// It assumes that '[' has already been consumed.
// It also handles the case that this is an item in an array of tables.
// e.g., '[[name]]'.
func lexTableStart(lx *lexer) stateFn {
if lx.peek() == arrayTableStart {
lx.next()
lx.emit(itemArrayTableStart)
lx.push(lexArrayTableEnd)
} else {
lx.emit(itemTableStart)
lx.push(lexTableEnd)
}
return lexTableNameStart
}
func lexTableEnd(lx *lexer) stateFn {
lx.emit(itemTableEnd)
return lexTopEnd
}
func lexArrayTableEnd(lx *lexer) stateFn {
if r := lx.next(); r != arrayTableEnd {
return lx.errorf("expected end of table array name delimiter %q, "+
"but got %q instead", arrayTableEnd, r)
}
lx.emit(itemArrayTableEnd)
return lexTopEnd
}
func lexTableNameStart(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.peek(); {
case r == tableEnd || r == eof:
return lx.errorf("unexpected end of table name " +
"(table names cannot be empty)")
case r == tableSep:
return lx.errorf("unexpected table separator " +
"(table names cannot be empty)")
case r == stringStart || r == rawStringStart:
lx.ignore()
lx.push(lexTableNameEnd)
return lexValue // reuse string lexing
default:
return lexBareTableName
}
}
// lexBareTableName lexes the name of a table. It assumes that at least one
// valid character for the table has already been read.
func lexBareTableName(lx *lexer) stateFn {
r := lx.next()
if isBareKeyChar(r) {
return lexBareTableName
}
lx.backup()
lx.emit(itemText)
return lexTableNameEnd
}
// lexTableNameEnd reads the end of a piece of a table name, optionally
// consuming whitespace.
func lexTableNameEnd(lx *lexer) stateFn {
lx.skip(isWhitespace)
switch r := lx.next(); {
case isWhitespace(r):
return lexTableNameEnd
case r == tableSep:
lx.ignore()
return lexTableNameStart
case r == tableEnd:
return lx.pop()
default:
return lx.errorf("expected '.' or ']' to end table name, "+
"but got %q instead", r)
}
}
// lexKeyStart consumes a key name up until the first non-whitespace character.
// lexKeyStart will ignore whitespace.
func lexKeyStart(lx *lexer) stateFn {
r := lx.peek()
switch {
case r == keySep:
return lx.errorf("unexpected key separator %q", keySep)
case isWhitespace(r) || isNL(r):
lx.next()
return lexSkip(lx, lexKeyStart)
case r == stringStart || r == rawStringStart:
lx.ignore()
lx.emit(itemKeyStart)
lx.push(lexKeyEnd)
return lexValue // reuse string lexing
default:
lx.ignore()
lx.emit(itemKeyStart)
return lexBareKey
}
}
// lexBareKey consumes the text of a bare key. Assumes that the first character
// (which is not whitespace) has not yet been consumed.
func lexBareKey(lx *lexer) stateFn {
switch r := lx.next(); {
case isBareKeyChar(r):
return lexBareKey
case isWhitespace(r):
lx.backup()
lx.emit(itemText)
return lexKeyEnd
case r == keySep:
lx.backup()
lx.emit(itemText)
return lexKeyEnd
default:
return lx.errorf("bare keys cannot contain %q", r)
}
}
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
// separator).
func lexKeyEnd(lx *lexer) stateFn {
switch r := lx.next(); {
case r == keySep:
return lexSkip(lx, lexValue)
case isWhitespace(r):
return lexSkip(lx, lexKeyEnd)
default:
return lx.errorf("expected key separator %q, but got %q instead",
keySep, r)
}
}
// lexValue starts the consumption of a value anywhere a value is expected.
// lexValue will ignore whitespace.
// After a value is lexed, the last state on the next is popped and returned.
func lexValue(lx *lexer) stateFn {
// We allow whitespace to precede a value, but NOT newlines.
// In array syntax, the array states are responsible for ignoring newlines.
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexValue)
case isDigit(r):
lx.backup() // avoid an extra state and use the same as above
return lexNumberOrDateStart
}
switch r {
case arrayStart:
lx.ignore()
lx.emit(itemArray)
return lexArrayValue
case inlineTableStart:
lx.ignore()
lx.emit(itemInlineTableStart)
return lexInlineTableValue
case stringStart:
if lx.accept(stringStart) {
if lx.accept(stringStart) {
lx.ignore() // Ignore """
return lexMultilineString
}
lx.backup()
}
lx.ignore() // ignore the '"'
return lexString
case rawStringStart:
if lx.accept(rawStringStart) {
if lx.accept(rawStringStart) {
lx.ignore() // Ignore """
return lexMultilineRawString
}
lx.backup()
}
lx.ignore() // ignore the "'"
return lexRawString
case '+', '-':
return lexNumberStart
case '.': // special error case, be kind to users
return lx.errorf("floats must start with a digit, not '.'")
}
if unicode.IsLetter(r) {
// Be permissive here; lexBool will give a nice error if the
// user wrote something like
// x = foo
// (i.e. not 'true' or 'false' but is something else word-like.)
lx.backup()
return lexBool
}
return lx.errorf("expected value but found %q instead", r)
}
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
// have already been consumed. All whitespace and newlines are ignored.
func lexArrayValue(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValue)
case r == commentStart:
lx.push(lexArrayValue)
return lexCommentStart
case r == comma:
return lx.errorf("unexpected comma")
case r == arrayEnd:
// NOTE(caleb): The spec isn't clear about whether you can have
// a trailing comma or not, so we'll allow it.
return lexArrayEnd
}
lx.backup()
lx.push(lexArrayValueEnd)
return lexValue
}
// lexArrayValueEnd consumes everything between the end of an array value and
// the next value (or the end of the array): it ignores whitespace and newlines
// and expects either a ',' or a ']'.
func lexArrayValueEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r) || isNL(r):
return lexSkip(lx, lexArrayValueEnd)
case r == commentStart:
lx.push(lexArrayValueEnd)
return lexCommentStart
case r == comma:
lx.ignore()
return lexArrayValue // move on to the next value
case r == arrayEnd:
return lexArrayEnd
}
return lx.errorf(
"expected a comma or array terminator %q, but got %q instead",
arrayEnd, r,
)
}
// lexArrayEnd finishes the lexing of an array.
// It assumes that a ']' has just been consumed.
func lexArrayEnd(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemArrayEnd)
return lx.pop()
}
// lexInlineTableValue consumes one key/value pair in an inline table.
// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
func lexInlineTableValue(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValue)
case isNL(r):
return lx.errorf("newlines not allowed within inline tables")
case r == commentStart:
lx.push(lexInlineTableValue)
return lexCommentStart
case r == comma:
return lx.errorf("unexpected comma")
case r == inlineTableEnd:
return lexInlineTableEnd
}
lx.backup()
lx.push(lexInlineTableValueEnd)
return lexKeyStart
}
// lexInlineTableValueEnd consumes everything between the end of an inline table
// key/value pair and the next pair (or the end of the table):
// it ignores whitespace and expects either a ',' or a '}'.
func lexInlineTableValueEnd(lx *lexer) stateFn {
r := lx.next()
switch {
case isWhitespace(r):
return lexSkip(lx, lexInlineTableValueEnd)
case isNL(r):
return lx.errorf("newlines not allowed within inline tables")
case r == commentStart:
lx.push(lexInlineTableValueEnd)
return lexCommentStart
case r == comma:
lx.ignore()
return lexInlineTableValue
case r == inlineTableEnd:
return lexInlineTableEnd
}
return lx.errorf("expected a comma or an inline table terminator %q, "+
"but got %q instead", inlineTableEnd, r)
}
// lexInlineTableEnd finishes the lexing of an inline table.
// It assumes that a '}' has just been consumed.
func lexInlineTableEnd(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemInlineTableEnd)
return lx.pop()
}
// lexString consumes the inner contents of a string. It assumes that the
// beginning '"' has already been consumed and ignored.
func lexString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == eof:
return lx.errorf("unexpected EOF")
case isNL(r):
return lx.errorf("strings cannot contain newlines")
case r == '\\':
lx.push(lexString)
return lexStringEscape
case r == stringEnd:
lx.backup()
lx.emit(itemString)
lx.next()
lx.ignore()
return lx.pop()
}
return lexString
}
// lexMultilineString consumes the inner contents of a string. It assumes that
// the beginning '"""' has already been consumed and ignored.
func lexMultilineString(lx *lexer) stateFn {
switch lx.next() {
case eof:
return lx.errorf("unexpected EOF")
case '\\':
return lexMultilineStringEscape
case stringEnd:
if lx.accept(stringEnd) {
if lx.accept(stringEnd) {
lx.backup()
lx.backup()
lx.backup()
lx.emit(itemMultilineString)
lx.next()
lx.next()
lx.next()
lx.ignore()
return lx.pop()
}
lx.backup()
}
}
return lexMultilineString
}
// lexRawString consumes a raw string. Nothing can be escaped in such a string.
// It assumes that the beginning "'" has already been consumed and ignored.
func lexRawString(lx *lexer) stateFn {
r := lx.next()
switch {
case r == eof:
return lx.errorf("unexpected EOF")
case isNL(r):
return lx.errorf("strings cannot contain newlines")
case r == rawStringEnd:
lx.backup()
lx.emit(itemRawString)
lx.next()
lx.ignore()
return lx.pop()
}
return lexRawString
}
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
// a string. It assumes that the beginning "'''" has already been consumed and
// ignored.
func lexMultilineRawString(lx *lexer) stateFn {
switch lx.next() {
case eof:
return lx.errorf("unexpected EOF")
case rawStringEnd:
if lx.accept(rawStringEnd) {
if lx.accept(rawStringEnd) {
lx.backup()
lx.backup()
lx.backup()
lx.emit(itemRawMultilineString)
lx.next()
lx.next()
lx.next()
lx.ignore()
return lx.pop()
}
lx.backup()
}
}
return lexMultilineRawString
}
// lexMultilineStringEscape consumes an escaped character. It assumes that the
// preceding '\\' has already been consumed.
func lexMultilineStringEscape(lx *lexer) stateFn {
// Handle the special case first:
if isNL(lx.next()) {
return lexMultilineString
}
lx.backup()
lx.push(lexMultilineString)
return lexStringEscape(lx)
}
func lexStringEscape(lx *lexer) stateFn {
r := lx.next()
switch r {
case 'b':
fallthrough
case 't':
fallthrough
case 'n':
fallthrough
case 'f':
fallthrough
case 'r':
fallthrough
case '"':
fallthrough
case '\\':
return lx.pop()
case 'u':
return lexShortUnicodeEscape
case 'U':
return lexLongUnicodeEscape
}
return lx.errorf("invalid escape character %q; only the following "+
"escape characters are allowed: "+
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
}
func lexShortUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 4; i++ {
r = lx.next()
if !isHexadecimal(r) {
return lx.errorf(`expected four hexadecimal digits after '\u', `+
"but got %q instead", lx.current())
}
}
return lx.pop()
}
func lexLongUnicodeEscape(lx *lexer) stateFn {
var r rune
for i := 0; i < 8; i++ {
r = lx.next()
if !isHexadecimal(r) {
return lx.errorf(`expected eight hexadecimal digits after '\U', `+
"but got %q instead", lx.current())
}
}
return lx.pop()
}
// lexNumberOrDateStart consumes either an integer, a float, or datetime.
func lexNumberOrDateStart(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexNumberOrDate
}
switch r {
case '_':
return lexNumber
case 'e', 'E':
return lexFloat
case '.':
return lx.errorf("floats must start with a digit, not '.'")
}
return lx.errorf("expected a digit but got %q", r)
}
// lexNumberOrDate consumes either an integer, float or datetime.
func lexNumberOrDate(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexNumberOrDate
}
switch r {
case '-':
return lexDatetime
case '_':
return lexNumber
case '.', 'e', 'E':
return lexFloat
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexDatetime consumes a Datetime, to a first approximation.
// The parser validates that it matches one of the accepted formats.
func lexDatetime(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexDatetime
}
switch r {
case '-', 'T', ':', '.', 'Z', '+':
return lexDatetime
}
lx.backup()
lx.emit(itemDatetime)
return lx.pop()
}
// lexNumberStart consumes either an integer or a float. It assumes that a sign
// has already been read, but that *no* digits have been consumed.
// lexNumberStart will move to the appropriate integer or float states.
func lexNumberStart(lx *lexer) stateFn {
// We MUST see a digit. Even floats have to start with a digit.
r := lx.next()
if !isDigit(r) {
if r == '.' {
return lx.errorf("floats must start with a digit, not '.'")
}
return lx.errorf("expected a digit but got %q", r)
}
return lexNumber
}
// lexNumber consumes an integer or a float after seeing the first digit.
func lexNumber(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexNumber
}
switch r {
case '_':
return lexNumber
case '.', 'e', 'E':
return lexFloat
}
lx.backup()
lx.emit(itemInteger)
return lx.pop()
}
// lexFloat consumes the elements of a float. It allows any sequence of
// float-like characters, so floats emitted by the lexer are only a first
// approximation and must be validated by the parser.
func lexFloat(lx *lexer) stateFn {
r := lx.next()
if isDigit(r) {
return lexFloat
}
switch r {
case '_', '.', '-', '+', 'e', 'E':
return lexFloat
}
lx.backup()
lx.emit(itemFloat)
return lx.pop()
}
// lexBool consumes a bool string: 'true' or 'false.
func lexBool(lx *lexer) stateFn {
var rs []rune
for {
r := lx.next()
if !unicode.IsLetter(r) {
lx.backup()
break
}
rs = append(rs, r)
}
s := string(rs)
switch s {
case "true", "false":
lx.emit(itemBool)
return lx.pop()
}
return lx.errorf("expected value but found %q instead", s)
}
// lexCommentStart begins the lexing of a comment. It will emit
// itemCommentStart and consume no characters, passing control to lexComment.
func lexCommentStart(lx *lexer) stateFn {
lx.ignore()
lx.emit(itemCommentStart)
return lexComment
}
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
// It will consume *up to* the first newline character, and pass control
// back to the last state on the stack.
func lexComment(lx *lexer) stateFn {
r := lx.peek()
if isNL(r) || r == eof {
lx.emit(itemText)
return lx.pop()
}
lx.next()
return lexComment
}
// lexSkip ignores all slurped input and moves on to the next state.
func lexSkip(lx *lexer, nextState stateFn) stateFn {
return func(lx *lexer) stateFn {
lx.ignore()
return nextState
}
}
// isWhitespace returns true if `r` is a whitespace character according
// to the spec.
func isWhitespace(r rune) bool {
return r == '\t' || r == ' '
}
func isNL(r rune) bool {
return r == '\n' || r == '\r'
}
func isDigit(r rune) bool {
return r >= '0' && r <= '9'
}
func isHexadecimal(r rune) bool {
return (r >= '0' && r <= '9') ||
(r >= 'a' && r <= 'f') ||
(r >= 'A' && r <= 'F')
}
func isBareKeyChar(r rune) bool {
return (r >= 'A' && r <= 'Z') ||
(r >= 'a' && r <= 'z') ||
(r >= '0' && r <= '9') ||
r == '_' ||
r == '-'
}
func (itype itemType) String() string {
switch itype {
case itemError:
return "Error"
case itemNIL:
return "NIL"
case itemEOF:
return "EOF"
case itemText:
return "Text"
case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
return "String"
case itemBool:
return "Bool"
case itemInteger:
return "Integer"
case itemFloat:
return "Float"
case itemDatetime:
return "DateTime"
case itemTableStart:
return "TableStart"
case itemTableEnd:
return "TableEnd"
case itemKeyStart:
return "KeyStart"
case itemArray:
return "Array"
case itemArrayEnd:
return "ArrayEnd"
case itemCommentStart:
return "CommentStart"
}
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
}
func (item item) String() string {
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
}

View file

@ -1,592 +0,0 @@
package toml
import (
"fmt"
"strconv"
"strings"
"time"
"unicode"
"unicode/utf8"
)
type parser struct {
mapping map[string]interface{}
types map[string]tomlType
lx *lexer
// A list of keys in the order that they appear in the TOML data.
ordered []Key
// the full key for the current hash in scope
context Key
// the base key name for everything except hashes
currentKey string
// rough approximation of line number
approxLine int
// A map of 'key.group.names' to whether they were created implicitly.
implicits map[string]bool
}
type parseError string
func (pe parseError) Error() string {
return string(pe)
}
func parse(data string) (p *parser, err error) {
defer func() {
if r := recover(); r != nil {
var ok bool
if err, ok = r.(parseError); ok {
return
}
panic(r)
}
}()
p = &parser{
mapping: make(map[string]interface{}),
types: make(map[string]tomlType),
lx: lex(data),
ordered: make([]Key, 0),
implicits: make(map[string]bool),
}
for {
item := p.next()
if item.typ == itemEOF {
break
}
p.topLevel(item)
}
return p, nil
}
func (p *parser) panicf(format string, v ...interface{}) {
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
p.approxLine, p.current(), fmt.Sprintf(format, v...))
panic(parseError(msg))
}
func (p *parser) next() item {
it := p.lx.nextItem()
if it.typ == itemError {
p.panicf("%s", it.val)
}
return it
}
func (p *parser) bug(format string, v ...interface{}) {
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
}
func (p *parser) expect(typ itemType) item {
it := p.next()
p.assertEqual(typ, it.typ)
return it
}
func (p *parser) assertEqual(expected, got itemType) {
if expected != got {
p.bug("Expected '%s' but got '%s'.", expected, got)
}
}
func (p *parser) topLevel(item item) {
switch item.typ {
case itemCommentStart:
p.approxLine = item.line
p.expect(itemText)
case itemTableStart:
kg := p.next()
p.approxLine = kg.line
var key Key
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
key = append(key, p.keyString(kg))
}
p.assertEqual(itemTableEnd, kg.typ)
p.establishContext(key, false)
p.setType("", tomlHash)
p.ordered = append(p.ordered, key)
case itemArrayTableStart:
kg := p.next()
p.approxLine = kg.line
var key Key
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
key = append(key, p.keyString(kg))
}
p.assertEqual(itemArrayTableEnd, kg.typ)
p.establishContext(key, true)
p.setType("", tomlArrayHash)
p.ordered = append(p.ordered, key)
case itemKeyStart:
kname := p.next()
p.approxLine = kname.line
p.currentKey = p.keyString(kname)
val, typ := p.value(p.next())
p.setValue(p.currentKey, val)
p.setType(p.currentKey, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
p.currentKey = ""
default:
p.bug("Unexpected type at top level: %s", item.typ)
}
}
// Gets a string for a key (or part of a key in a table name).
func (p *parser) keyString(it item) string {
switch it.typ {
case itemText:
return it.val
case itemString, itemMultilineString,
itemRawString, itemRawMultilineString:
s, _ := p.value(it)
return s.(string)
default:
p.bug("Unexpected key type: %s", it.typ)
panic("unreachable")
}
}
// value translates an expected value from the lexer into a Go value wrapped
// as an empty interface.
func (p *parser) value(it item) (interface{}, tomlType) {
switch it.typ {
case itemString:
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
case itemMultilineString:
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
case itemRawString:
return it.val, p.typeOfPrimitive(it)
case itemRawMultilineString:
return stripFirstNewline(it.val), p.typeOfPrimitive(it)
case itemBool:
switch it.val {
case "true":
return true, p.typeOfPrimitive(it)
case "false":
return false, p.typeOfPrimitive(it)
}
p.bug("Expected boolean value, but got '%s'.", it.val)
case itemInteger:
if !numUnderscoresOK(it.val) {
p.panicf("Invalid integer %q: underscores must be surrounded by digits",
it.val)
}
val := strings.Replace(it.val, "_", "", -1)
num, err := strconv.ParseInt(val, 10, 64)
if err != nil {
// Distinguish integer values. Normally, it'd be a bug if the lexer
// provides an invalid integer, but it's possible that the number is
// out of range of valid values (which the lexer cannot determine).
// So mark the former as a bug but the latter as a legitimate user
// error.
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Integer '%s' is out of the range of 64-bit "+
"signed integers.", it.val)
} else {
p.bug("Expected integer value, but got '%s'.", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemFloat:
parts := strings.FieldsFunc(it.val, func(r rune) bool {
switch r {
case '.', 'e', 'E':
return true
}
return false
})
for _, part := range parts {
if !numUnderscoresOK(part) {
p.panicf("Invalid float %q: underscores must be "+
"surrounded by digits", it.val)
}
}
if !numPeriodsOK(it.val) {
// As a special case, numbers like '123.' or '1.e2',
// which are valid as far as Go/strconv are concerned,
// must be rejected because TOML says that a fractional
// part consists of '.' followed by 1+ digits.
p.panicf("Invalid float %q: '.' must be followed "+
"by one or more digits", it.val)
}
val := strings.Replace(it.val, "_", "", -1)
num, err := strconv.ParseFloat(val, 64)
if err != nil {
if e, ok := err.(*strconv.NumError); ok &&
e.Err == strconv.ErrRange {
p.panicf("Float '%s' is out of the range of 64-bit "+
"IEEE-754 floating-point numbers.", it.val)
} else {
p.panicf("Invalid float value: %q", it.val)
}
}
return num, p.typeOfPrimitive(it)
case itemDatetime:
var t time.Time
var ok bool
var err error
for _, format := range []string{
"2006-01-02T15:04:05Z07:00",
"2006-01-02T15:04:05",
"2006-01-02",
} {
t, err = time.ParseInLocation(format, it.val, time.Local)
if err == nil {
ok = true
break
}
}
if !ok {
p.panicf("Invalid TOML Datetime: %q.", it.val)
}
return t, p.typeOfPrimitive(it)
case itemArray:
array := make([]interface{}, 0)
types := make([]tomlType, 0)
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
val, typ := p.value(it)
array = append(array, val)
types = append(types, typ)
}
return array, p.typeOfArray(types)
case itemInlineTableStart:
var (
hash = make(map[string]interface{})
outerContext = p.context
outerKey = p.currentKey
)
p.context = append(p.context, p.currentKey)
p.currentKey = ""
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
if it.typ != itemKeyStart {
p.bug("Expected key start but instead found %q, around line %d",
it.val, p.approxLine)
}
if it.typ == itemCommentStart {
p.expect(itemText)
continue
}
// retrieve key
k := p.next()
p.approxLine = k.line
kname := p.keyString(k)
// retrieve value
p.currentKey = kname
val, typ := p.value(p.next())
// make sure we keep metadata up to date
p.setType(kname, typ)
p.ordered = append(p.ordered, p.context.add(p.currentKey))
hash[kname] = val
}
p.context = outerContext
p.currentKey = outerKey
return hash, tomlHash
}
p.bug("Unexpected value type: %s", it.typ)
panic("unreachable")
}
// numUnderscoresOK checks whether each underscore in s is surrounded by
// characters that are not underscores.
func numUnderscoresOK(s string) bool {
accept := false
for _, r := range s {
if r == '_' {
if !accept {
return false
}
accept = false
continue
}
accept = true
}
return accept
}
// numPeriodsOK checks whether every period in s is followed by a digit.
func numPeriodsOK(s string) bool {
period := false
for _, r := range s {
if period && !isDigit(r) {
return false
}
period = r == '.'
}
return !period
}
// establishContext sets the current context of the parser,
// where the context is either a hash or an array of hashes. Which one is
// set depends on the value of the `array` parameter.
//
// Establishing the context also makes sure that the key isn't a duplicate, and
// will create implicit hashes automatically.
func (p *parser) establishContext(key Key, array bool) {
var ok bool
// Always start at the top level and drill down for our context.
hashContext := p.mapping
keyContext := make(Key, 0)
// We only need implicit hashes for key[0:-1]
for _, k := range key[0 : len(key)-1] {
_, ok = hashContext[k]
keyContext = append(keyContext, k)
// No key? Make an implicit hash and move on.
if !ok {
p.addImplicit(keyContext)
hashContext[k] = make(map[string]interface{})
}
// If the hash context is actually an array of tables, then set
// the hash context to the last element in that array.
//
// Otherwise, it better be a table, since this MUST be a key group (by
// virtue of it not being the last element in a key).
switch t := hashContext[k].(type) {
case []map[string]interface{}:
hashContext = t[len(t)-1]
case map[string]interface{}:
hashContext = t
default:
p.panicf("Key '%s' was already created as a hash.", keyContext)
}
}
p.context = keyContext
if array {
// If this is the first element for this array, then allocate a new
// list of tables for it.
k := key[len(key)-1]
if _, ok := hashContext[k]; !ok {
hashContext[k] = make([]map[string]interface{}, 0, 5)
}
// Add a new table. But make sure the key hasn't already been used
// for something else.
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
hashContext[k] = append(hash, make(map[string]interface{}))
} else {
p.panicf("Key '%s' was already created and cannot be used as "+
"an array.", keyContext)
}
} else {
p.setValue(key[len(key)-1], make(map[string]interface{}))
}
p.context = append(p.context, key[len(key)-1])
}
// setValue sets the given key to the given value in the current context.
// It will make sure that the key hasn't already been defined, account for
// implicit key groups.
func (p *parser) setValue(key string, value interface{}) {
var tmpHash interface{}
var ok bool
hash := p.mapping
keyContext := make(Key, 0)
for _, k := range p.context {
keyContext = append(keyContext, k)
if tmpHash, ok = hash[k]; !ok {
p.bug("Context for key '%s' has not been established.", keyContext)
}
switch t := tmpHash.(type) {
case []map[string]interface{}:
// The context is a table of hashes. Pick the most recent table
// defined as the current hash.
hash = t[len(t)-1]
case map[string]interface{}:
hash = t
default:
p.bug("Expected hash to have type 'map[string]interface{}', but "+
"it has '%T' instead.", tmpHash)
}
}
keyContext = append(keyContext, key)
if _, ok := hash[key]; ok {
// Typically, if the given key has already been set, then we have
// to raise an error since duplicate keys are disallowed. However,
// it's possible that a key was previously defined implicitly. In this
// case, it is allowed to be redefined concretely. (See the
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
//
// But we have to make sure to stop marking it as an implicit. (So that
// another redefinition provokes an error.)
//
// Note that since it has already been defined (as a hash), we don't
// want to overwrite it. So our business is done.
if p.isImplicit(keyContext) {
p.removeImplicit(keyContext)
return
}
// Otherwise, we have a concrete key trying to override a previous
// key, which is *always* wrong.
p.panicf("Key '%s' has already been defined.", keyContext)
}
hash[key] = value
}
// setType sets the type of a particular value at a given key.
// It should be called immediately AFTER setValue.
//
// Note that if `key` is empty, then the type given will be applied to the
// current context (which is either a table or an array of tables).
func (p *parser) setType(key string, typ tomlType) {
keyContext := make(Key, 0, len(p.context)+1)
for _, k := range p.context {
keyContext = append(keyContext, k)
}
if len(key) > 0 { // allow type setting for hashes
keyContext = append(keyContext, key)
}
p.types[keyContext.String()] = typ
}
// addImplicit sets the given Key as having been created implicitly.
func (p *parser) addImplicit(key Key) {
p.implicits[key.String()] = true
}
// removeImplicit stops tagging the given key as having been implicitly
// created.
func (p *parser) removeImplicit(key Key) {
p.implicits[key.String()] = false
}
// isImplicit returns true if the key group pointed to by the key was created
// implicitly.
func (p *parser) isImplicit(key Key) bool {
return p.implicits[key.String()]
}
// current returns the full key name of the current context.
func (p *parser) current() string {
if len(p.currentKey) == 0 {
return p.context.String()
}
if len(p.context) == 0 {
return p.currentKey
}
return fmt.Sprintf("%s.%s", p.context, p.currentKey)
}
func stripFirstNewline(s string) string {
if len(s) == 0 || s[0] != '\n' {
return s
}
return s[1:]
}
func stripEscapedWhitespace(s string) string {
esc := strings.Split(s, "\\\n")
if len(esc) > 1 {
for i := 1; i < len(esc); i++ {
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
}
}
return strings.Join(esc, "")
}
func (p *parser) replaceEscapes(str string) string {
var replaced []rune
s := []byte(str)
r := 0
for r < len(s) {
if s[r] != '\\' {
c, size := utf8.DecodeRune(s[r:])
r += size
replaced = append(replaced, c)
continue
}
r += 1
if r >= len(s) {
p.bug("Escape sequence at end of string.")
return ""
}
switch s[r] {
default:
p.bug("Expected valid escape code after \\, but got %q.", s[r])
return ""
case 'b':
replaced = append(replaced, rune(0x0008))
r += 1
case 't':
replaced = append(replaced, rune(0x0009))
r += 1
case 'n':
replaced = append(replaced, rune(0x000A))
r += 1
case 'f':
replaced = append(replaced, rune(0x000C))
r += 1
case 'r':
replaced = append(replaced, rune(0x000D))
r += 1
case '"':
replaced = append(replaced, rune(0x0022))
r += 1
case '\\':
replaced = append(replaced, rune(0x005C))
r += 1
case 'u':
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
// for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
replaced = append(replaced, escaped)
r += 5
case 'U':
// At this point, we know we have a Unicode escape of the form
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
// for us.)
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
replaced = append(replaced, escaped)
r += 9
}
}
return string(replaced)
}
func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
s := string(bs)
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
if err != nil {
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
"lexer claims it's OK: %s", s, err)
}
if !utf8.ValidRune(rune(hex)) {
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
}
return rune(hex)
}
func isStringType(ty itemType) bool {
return ty == itemString || ty == itemMultilineString ||
ty == itemRawString || ty == itemRawMultilineString
}

View file

@ -1 +0,0 @@
au BufWritePost *.go silent!make tags > /dev/null 2>&1

View file

@ -1,91 +0,0 @@
package toml
// tomlType represents any Go type that corresponds to a TOML type.
// While the first draft of the TOML spec has a simplistic type system that
// probably doesn't need this level of sophistication, we seem to be militating
// toward adding real composite types.
type tomlType interface {
typeString() string
}
// typeEqual accepts any two types and returns true if they are equal.
func typeEqual(t1, t2 tomlType) bool {
if t1 == nil || t2 == nil {
return false
}
return t1.typeString() == t2.typeString()
}
func typeIsHash(t tomlType) bool {
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
}
type tomlBaseType string
func (btype tomlBaseType) typeString() string {
return string(btype)
}
func (btype tomlBaseType) String() string {
return btype.typeString()
}
var (
tomlInteger tomlBaseType = "Integer"
tomlFloat tomlBaseType = "Float"
tomlDatetime tomlBaseType = "Datetime"
tomlString tomlBaseType = "String"
tomlBool tomlBaseType = "Bool"
tomlArray tomlBaseType = "Array"
tomlHash tomlBaseType = "Hash"
tomlArrayHash tomlBaseType = "ArrayHash"
)
// typeOfPrimitive returns a tomlType of any primitive value in TOML.
// Primitive values are: Integer, Float, Datetime, String and Bool.
//
// Passing a lexer item other than the following will cause a BUG message
// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
func (p *parser) typeOfPrimitive(lexItem item) tomlType {
switch lexItem.typ {
case itemInteger:
return tomlInteger
case itemFloat:
return tomlFloat
case itemDatetime:
return tomlDatetime
case itemString:
return tomlString
case itemMultilineString:
return tomlString
case itemRawString:
return tomlString
case itemRawMultilineString:
return tomlString
case itemBool:
return tomlBool
}
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
panic("unreachable")
}
// typeOfArray returns a tomlType for an array given a list of types of its
// values.
//
// In the current spec, if an array is homogeneous, then its type is always
// "Array". If the array is not homogeneous, an error is generated.
func (p *parser) typeOfArray(types []tomlType) tomlType {
// Empty arrays are cool.
if len(types) == 0 {
return tomlArray
}
theType := types[0]
for _, t := range types[1:] {
if !typeEqual(theType, t) {
p.panicf("Array contains values of type '%s' and '%s', but "+
"arrays must be homogeneous.", theType, t)
}
}
return tomlArray
}

View file

@ -1,242 +0,0 @@
package toml
// Struct field handling is adapted from code in encoding/json:
//
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the Go distribution.
import (
"reflect"
"sort"
"sync"
)
// A field represents a single field found in a struct.
type field struct {
name string // the name of the field (`toml` tag included)
tag bool // whether field has a `toml` tag
index []int // represents the depth of an anonymous field
typ reflect.Type // the type of the field
}
// byName sorts field by name, breaking ties with depth,
// then breaking ties with "name came from toml tag", then
// breaking ties with index sequence.
type byName []field
func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that TOML should recognize for the given
// type. The algorithm is breadth-first search over the set of structs to
// include - the top struct and then any reachable anonymous structs.
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.PkgPath != "" && !sf.Anonymous { // unexported
continue
}
opts := getOptions(sf.Tag)
if opts.skip {
continue
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Record found field and index sequence.
if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := opts.name != ""
name := opts.name
if name == "" {
name = sf.Name
}
fields = append(fields, field{name, tagged, index, ft})
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
f := field{name: ft.Name(), index: index, typ: ft}
next = append(next, f)
}
}
}
}
sort.Sort(byName(fields))
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with TOML tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(byIndex(fields))
return fields
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// TOML tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
if f.tag {
if tagged >= 0 {
// Multiple tagged fields at the same level: conflict.
// Return no field.
return field{}, false
}
tagged = i
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
sync.RWMutex
m map[reflect.Type][]field
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field {
fieldCache.RLock()
f := fieldCache.m[t]
fieldCache.RUnlock()
if f != nil {
return f
}
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.Lock()
if fieldCache.m == nil {
fieldCache.m = map[reflect.Type][]field{}
}
fieldCache.m[t] = f
fieldCache.Unlock()
return f
}

3
vendor/golang.org/x/tools/AUTHORS generated vendored
View file

@ -1,3 +0,0 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

View file

@ -1,3 +0,0 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

27
vendor/golang.org/x/tools/LICENSE generated vendored
View file

@ -1,27 +0,0 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/tools/PATENTS generated vendored
View file

@ -1,22 +0,0 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

View file

@ -1,627 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package astutil
// This file defines utilities for working with source positions.
import (
"fmt"
"go/ast"
"go/token"
"sort"
)
// PathEnclosingInterval returns the node that encloses the source
// interval [start, end), and all its ancestors up to the AST root.
//
// The definition of "enclosing" used by this function considers
// additional whitespace abutting a node to be enclosed by it.
// In this example:
//
// z := x + y // add them
// <-A->
// <----B----->
//
// the ast.BinaryExpr(+) node is considered to enclose interval B
// even though its [Pos()..End()) is actually only interval A.
// This behaviour makes user interfaces more tolerant of imperfect
// input.
//
// This function treats tokens as nodes, though they are not included
// in the result. e.g. PathEnclosingInterval("+") returns the
// enclosing ast.BinaryExpr("x + y").
//
// If start==end, the 1-char interval following start is used instead.
//
// The 'exact' result is true if the interval contains only path[0]
// and perhaps some adjacent whitespace. It is false if the interval
// overlaps multiple children of path[0], or if it contains only
// interior whitespace of path[0].
// In this example:
//
// z := x + y // add them
// <--C--> <---E-->
// ^
// D
//
// intervals C, D and E are inexact. C is contained by the
// z-assignment statement, because it spans three of its children (:=,
// x, +). So too is the 1-char interval D, because it contains only
// interior whitespace of the assignment. E is considered interior
// whitespace of the BlockStmt containing the assignment.
//
// Precondition: [start, end) both lie within the same file as root.
// TODO(adonovan): return (nil, false) in this case and remove precond.
// Requires FileSet; see loader.tokenFileContainsPos.
//
// Postcondition: path is never nil; it always contains at least 'root'.
//
func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) {
// fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging
// Precondition: node.[Pos..End) and adjoining whitespace contain [start, end).
var visit func(node ast.Node) bool
visit = func(node ast.Node) bool {
path = append(path, node)
nodePos := node.Pos()
nodeEnd := node.End()
// fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging
// Intersect [start, end) with interval of node.
if start < nodePos {
start = nodePos
}
if end > nodeEnd {
end = nodeEnd
}
// Find sole child that contains [start, end).
children := childrenOf(node)
l := len(children)
for i, child := range children {
// [childPos, childEnd) is unaugmented interval of child.
childPos := child.Pos()
childEnd := child.End()
// [augPos, augEnd) is whitespace-augmented interval of child.
augPos := childPos
augEnd := childEnd
if i > 0 {
augPos = children[i-1].End() // start of preceding whitespace
}
if i < l-1 {
nextChildPos := children[i+1].Pos()
// Does [start, end) lie between child and next child?
if start >= augEnd && end <= nextChildPos {
return false // inexact match
}
augEnd = nextChildPos // end of following whitespace
}
// fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n",
// i, augPos, augEnd, start, end) // debugging
// Does augmented child strictly contain [start, end)?
if augPos <= start && end <= augEnd {
_, isToken := child.(tokenNode)
return isToken || visit(child)
}
// Does [start, end) overlap multiple children?
// i.e. left-augmented child contains start
// but LR-augmented child does not contain end.
if start < childEnd && end > augEnd {
break
}
}
// No single child contained [start, end),
// so node is the result. Is it exact?
// (It's tempting to put this condition before the
// child loop, but it gives the wrong result in the
// case where a node (e.g. ExprStmt) and its sole
// child have equal intervals.)
if start == nodePos && end == nodeEnd {
return true // exact match
}
return false // inexact: overlaps multiple children
}
if start > end {
start, end = end, start
}
if start < root.End() && end > root.Pos() {
if start == end {
end = start + 1 // empty interval => interval of size 1
}
exact = visit(root)
// Reverse the path:
for i, l := 0, len(path); i < l/2; i++ {
path[i], path[l-1-i] = path[l-1-i], path[i]
}
} else {
// Selection lies within whitespace preceding the
// first (or following the last) declaration in the file.
// The result nonetheless always includes the ast.File.
path = append(path, root)
}
return
}
// tokenNode is a dummy implementation of ast.Node for a single token.
// They are used transiently by PathEnclosingInterval but never escape
// this package.
//
type tokenNode struct {
pos token.Pos
end token.Pos
}
func (n tokenNode) Pos() token.Pos {
return n.pos
}
func (n tokenNode) End() token.Pos {
return n.end
}
func tok(pos token.Pos, len int) ast.Node {
return tokenNode{pos, pos + token.Pos(len)}
}
// childrenOf returns the direct non-nil children of ast.Node n.
// It may include fake ast.Node implementations for bare tokens.
// it is not safe to call (e.g.) ast.Walk on such nodes.
//
func childrenOf(n ast.Node) []ast.Node {
var children []ast.Node
// First add nodes for all true subtrees.
ast.Inspect(n, func(node ast.Node) bool {
if node == n { // push n
return true // recur
}
if node != nil { // push child
children = append(children, node)
}
return false // no recursion
})
// Then add fake Nodes for bare tokens.
switch n := n.(type) {
case *ast.ArrayType:
children = append(children,
tok(n.Lbrack, len("[")),
tok(n.Elt.End(), len("]")))
case *ast.AssignStmt:
children = append(children,
tok(n.TokPos, len(n.Tok.String())))
case *ast.BasicLit:
children = append(children,
tok(n.ValuePos, len(n.Value)))
case *ast.BinaryExpr:
children = append(children, tok(n.OpPos, len(n.Op.String())))
case *ast.BlockStmt:
children = append(children,
tok(n.Lbrace, len("{")),
tok(n.Rbrace, len("}")))
case *ast.BranchStmt:
children = append(children,
tok(n.TokPos, len(n.Tok.String())))
case *ast.CallExpr:
children = append(children,
tok(n.Lparen, len("(")),
tok(n.Rparen, len(")")))
if n.Ellipsis != 0 {
children = append(children, tok(n.Ellipsis, len("...")))
}
case *ast.CaseClause:
if n.List == nil {
children = append(children,
tok(n.Case, len("default")))
} else {
children = append(children,
tok(n.Case, len("case")))
}
children = append(children, tok(n.Colon, len(":")))
case *ast.ChanType:
switch n.Dir {
case ast.RECV:
children = append(children, tok(n.Begin, len("<-chan")))
case ast.SEND:
children = append(children, tok(n.Begin, len("chan<-")))
case ast.RECV | ast.SEND:
children = append(children, tok(n.Begin, len("chan")))
}
case *ast.CommClause:
if n.Comm == nil {
children = append(children,
tok(n.Case, len("default")))
} else {
children = append(children,
tok(n.Case, len("case")))
}
children = append(children, tok(n.Colon, len(":")))
case *ast.Comment:
// nop
case *ast.CommentGroup:
// nop
case *ast.CompositeLit:
children = append(children,
tok(n.Lbrace, len("{")),
tok(n.Rbrace, len("{")))
case *ast.DeclStmt:
// nop
case *ast.DeferStmt:
children = append(children,
tok(n.Defer, len("defer")))
case *ast.Ellipsis:
children = append(children,
tok(n.Ellipsis, len("...")))
case *ast.EmptyStmt:
// nop
case *ast.ExprStmt:
// nop
case *ast.Field:
// TODO(adonovan): Field.{Doc,Comment,Tag}?
case *ast.FieldList:
children = append(children,
tok(n.Opening, len("(")),
tok(n.Closing, len(")")))
case *ast.File:
// TODO test: Doc
children = append(children,
tok(n.Package, len("package")))
case *ast.ForStmt:
children = append(children,
tok(n.For, len("for")))
case *ast.FuncDecl:
// TODO(adonovan): FuncDecl.Comment?
// Uniquely, FuncDecl breaks the invariant that
// preorder traversal yields tokens in lexical order:
// in fact, FuncDecl.Recv precedes FuncDecl.Type.Func.
//
// As a workaround, we inline the case for FuncType
// here and order things correctly.
//
children = nil // discard ast.Walk(FuncDecl) info subtrees
children = append(children, tok(n.Type.Func, len("func")))
if n.Recv != nil {
children = append(children, n.Recv)
}
children = append(children, n.Name)
if n.Type.Params != nil {
children = append(children, n.Type.Params)
}
if n.Type.Results != nil {
children = append(children, n.Type.Results)
}
if n.Body != nil {
children = append(children, n.Body)
}
case *ast.FuncLit:
// nop
case *ast.FuncType:
if n.Func != 0 {
children = append(children,
tok(n.Func, len("func")))
}
case *ast.GenDecl:
children = append(children,
tok(n.TokPos, len(n.Tok.String())))
if n.Lparen != 0 {
children = append(children,
tok(n.Lparen, len("(")),
tok(n.Rparen, len(")")))
}
case *ast.GoStmt:
children = append(children,
tok(n.Go, len("go")))
case *ast.Ident:
children = append(children,
tok(n.NamePos, len(n.Name)))
case *ast.IfStmt:
children = append(children,
tok(n.If, len("if")))
case *ast.ImportSpec:
// TODO(adonovan): ImportSpec.{Doc,EndPos}?
case *ast.IncDecStmt:
children = append(children,
tok(n.TokPos, len(n.Tok.String())))
case *ast.IndexExpr:
children = append(children,
tok(n.Lbrack, len("{")),
tok(n.Rbrack, len("}")))
case *ast.InterfaceType:
children = append(children,
tok(n.Interface, len("interface")))
case *ast.KeyValueExpr:
children = append(children,
tok(n.Colon, len(":")))
case *ast.LabeledStmt:
children = append(children,
tok(n.Colon, len(":")))
case *ast.MapType:
children = append(children,
tok(n.Map, len("map")))
case *ast.ParenExpr:
children = append(children,
tok(n.Lparen, len("(")),
tok(n.Rparen, len(")")))
case *ast.RangeStmt:
children = append(children,
tok(n.For, len("for")),
tok(n.TokPos, len(n.Tok.String())))
case *ast.ReturnStmt:
children = append(children,
tok(n.Return, len("return")))
case *ast.SelectStmt:
children = append(children,
tok(n.Select, len("select")))
case *ast.SelectorExpr:
// nop
case *ast.SendStmt:
children = append(children,
tok(n.Arrow, len("<-")))
case *ast.SliceExpr:
children = append(children,
tok(n.Lbrack, len("[")),
tok(n.Rbrack, len("]")))
case *ast.StarExpr:
children = append(children, tok(n.Star, len("*")))
case *ast.StructType:
children = append(children, tok(n.Struct, len("struct")))
case *ast.SwitchStmt:
children = append(children, tok(n.Switch, len("switch")))
case *ast.TypeAssertExpr:
children = append(children,
tok(n.Lparen-1, len(".")),
tok(n.Lparen, len("(")),
tok(n.Rparen, len(")")))
case *ast.TypeSpec:
// TODO(adonovan): TypeSpec.{Doc,Comment}?
case *ast.TypeSwitchStmt:
children = append(children, tok(n.Switch, len("switch")))
case *ast.UnaryExpr:
children = append(children, tok(n.OpPos, len(n.Op.String())))
case *ast.ValueSpec:
// TODO(adonovan): ValueSpec.{Doc,Comment}?
case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt:
// nop
}
// TODO(adonovan): opt: merge the logic of ast.Inspect() into
// the switch above so we can make interleaved callbacks for
// both Nodes and Tokens in the right order and avoid the need
// to sort.
sort.Sort(byPos(children))
return children
}
type byPos []ast.Node
func (sl byPos) Len() int {
return len(sl)
}
func (sl byPos) Less(i, j int) bool {
return sl[i].Pos() < sl[j].Pos()
}
func (sl byPos) Swap(i, j int) {
sl[i], sl[j] = sl[j], sl[i]
}
// NodeDescription returns a description of the concrete type of n suitable
// for a user interface.
//
// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident,
// StarExpr) we could be much more specific given the path to the AST
// root. Perhaps we should do that.
//
func NodeDescription(n ast.Node) string {
switch n := n.(type) {
case *ast.ArrayType:
return "array type"
case *ast.AssignStmt:
return "assignment"
case *ast.BadDecl:
return "bad declaration"
case *ast.BadExpr:
return "bad expression"
case *ast.BadStmt:
return "bad statement"
case *ast.BasicLit:
return "basic literal"
case *ast.BinaryExpr:
return fmt.Sprintf("binary %s operation", n.Op)
case *ast.BlockStmt:
return "block"
case *ast.BranchStmt:
switch n.Tok {
case token.BREAK:
return "break statement"
case token.CONTINUE:
return "continue statement"
case token.GOTO:
return "goto statement"
case token.FALLTHROUGH:
return "fall-through statement"
}
case *ast.CallExpr:
if len(n.Args) == 1 && !n.Ellipsis.IsValid() {
return "function call (or conversion)"
}
return "function call"
case *ast.CaseClause:
return "case clause"
case *ast.ChanType:
return "channel type"
case *ast.CommClause:
return "communication clause"
case *ast.Comment:
return "comment"
case *ast.CommentGroup:
return "comment group"
case *ast.CompositeLit:
return "composite literal"
case *ast.DeclStmt:
return NodeDescription(n.Decl) + " statement"
case *ast.DeferStmt:
return "defer statement"
case *ast.Ellipsis:
return "ellipsis"
case *ast.EmptyStmt:
return "empty statement"
case *ast.ExprStmt:
return "expression statement"
case *ast.Field:
// Can be any of these:
// struct {x, y int} -- struct field(s)
// struct {T} -- anon struct field
// interface {I} -- interface embedding
// interface {f()} -- interface method
// func (A) func(B) C -- receiver, param(s), result(s)
return "field/method/parameter"
case *ast.FieldList:
return "field/method/parameter list"
case *ast.File:
return "source file"
case *ast.ForStmt:
return "for loop"
case *ast.FuncDecl:
return "function declaration"
case *ast.FuncLit:
return "function literal"
case *ast.FuncType:
return "function type"
case *ast.GenDecl:
switch n.Tok {
case token.IMPORT:
return "import declaration"
case token.CONST:
return "constant declaration"
case token.TYPE:
return "type declaration"
case token.VAR:
return "variable declaration"
}
case *ast.GoStmt:
return "go statement"
case *ast.Ident:
return "identifier"
case *ast.IfStmt:
return "if statement"
case *ast.ImportSpec:
return "import specification"
case *ast.IncDecStmt:
if n.Tok == token.INC {
return "increment statement"
}
return "decrement statement"
case *ast.IndexExpr:
return "index expression"
case *ast.InterfaceType:
return "interface type"
case *ast.KeyValueExpr:
return "key/value association"
case *ast.LabeledStmt:
return "statement label"
case *ast.MapType:
return "map type"
case *ast.Package:
return "package"
case *ast.ParenExpr:
return "parenthesized " + NodeDescription(n.X)
case *ast.RangeStmt:
return "range loop"
case *ast.ReturnStmt:
return "return statement"
case *ast.SelectStmt:
return "select statement"
case *ast.SelectorExpr:
return "selector"
case *ast.SendStmt:
return "channel send"
case *ast.SliceExpr:
return "slice expression"
case *ast.StarExpr:
return "*-operation" // load/store expr or pointer type
case *ast.StructType:
return "struct type"
case *ast.SwitchStmt:
return "switch statement"
case *ast.TypeAssertExpr:
return "type assertion"
case *ast.TypeSpec:
return "type specification"
case *ast.TypeSwitchStmt:
return "type switch"
case *ast.UnaryExpr:
return fmt.Sprintf("unary %s operation", n.Op)
case *ast.ValueSpec:
return "value specification"
}
panic(fmt.Sprintf("unexpected node type: %T", n))
}

View file

@ -1,471 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package astutil contains common utilities for working with the Go AST.
package astutil // import "golang.org/x/tools/go/ast/astutil"
import (
"fmt"
"go/ast"
"go/token"
"strconv"
"strings"
)
// AddImport adds the import path to the file f, if absent.
func AddImport(fset *token.FileSet, f *ast.File, ipath string) (added bool) {
return AddNamedImport(fset, f, "", ipath)
}
// AddNamedImport adds the import path to the file f, if absent.
// If name is not empty, it is used to rename the import.
//
// For example, calling
// AddNamedImport(fset, f, "pathpkg", "path")
// adds
// import pathpkg "path"
func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added bool) {
if imports(f, ipath) {
return false
}
newImport := &ast.ImportSpec{
Path: &ast.BasicLit{
Kind: token.STRING,
Value: strconv.Quote(ipath),
},
}
if name != "" {
newImport.Name = &ast.Ident{Name: name}
}
// Find an import decl to add to.
// The goal is to find an existing import
// whose import path has the longest shared
// prefix with ipath.
var (
bestMatch = -1 // length of longest shared prefix
lastImport = -1 // index in f.Decls of the file's final import decl
impDecl *ast.GenDecl // import decl containing the best match
impIndex = -1 // spec index in impDecl containing the best match
isThirdPartyPath = isThirdParty(ipath)
)
for i, decl := range f.Decls {
gen, ok := decl.(*ast.GenDecl)
if ok && gen.Tok == token.IMPORT {
lastImport = i
// Do not add to import "C", to avoid disrupting the
// association with its doc comment, breaking cgo.
if declImports(gen, "C") {
continue
}
// Match an empty import decl if that's all that is available.
if len(gen.Specs) == 0 && bestMatch == -1 {
impDecl = gen
}
// Compute longest shared prefix with imports in this group and find best
// matched import spec.
// 1. Always prefer import spec with longest shared prefix.
// 2. While match length is 0,
// - for stdlib package: prefer first import spec.
// - for third party package: prefer first third party import spec.
// We cannot use last import spec as best match for third party package
// because grouped imports are usually placed last by goimports -local
// flag.
// See issue #19190.
seenAnyThirdParty := false
for j, spec := range gen.Specs {
impspec := spec.(*ast.ImportSpec)
p := importPath(impspec)
n := matchLen(p, ipath)
if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) {
bestMatch = n
impDecl = gen
impIndex = j
}
seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p)
}
}
}
// If no import decl found, add one after the last import.
if impDecl == nil {
impDecl = &ast.GenDecl{
Tok: token.IMPORT,
}
if lastImport >= 0 {
impDecl.TokPos = f.Decls[lastImport].End()
} else {
// There are no existing imports.
// Our new import, preceded by a blank line, goes after the package declaration
// and after the comment, if any, that starts on the same line as the
// package declaration.
impDecl.TokPos = f.Package
file := fset.File(f.Package)
pkgLine := file.Line(f.Package)
for _, c := range f.Comments {
if file.Line(c.Pos()) > pkgLine {
break
}
// +2 for a blank line
impDecl.TokPos = c.End() + 2
}
}
f.Decls = append(f.Decls, nil)
copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:])
f.Decls[lastImport+1] = impDecl
}
// Insert new import at insertAt.
insertAt := 0
if impIndex >= 0 {
// insert after the found import
insertAt = impIndex + 1
}
impDecl.Specs = append(impDecl.Specs, nil)
copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:])
impDecl.Specs[insertAt] = newImport
pos := impDecl.Pos()
if insertAt > 0 {
// If there is a comment after an existing import, preserve the comment
// position by adding the new import after the comment.
if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil {
pos = spec.Comment.End()
} else {
// Assign same position as the previous import,
// so that the sorter sees it as being in the same block.
pos = impDecl.Specs[insertAt-1].Pos()
}
}
if newImport.Name != nil {
newImport.Name.NamePos = pos
}
newImport.Path.ValuePos = pos
newImport.EndPos = pos
// Clean up parens. impDecl contains at least one spec.
if len(impDecl.Specs) == 1 {
// Remove unneeded parens.
impDecl.Lparen = token.NoPos
} else if !impDecl.Lparen.IsValid() {
// impDecl needs parens added.
impDecl.Lparen = impDecl.Specs[0].Pos()
}
f.Imports = append(f.Imports, newImport)
if len(f.Decls) <= 1 {
return true
}
// Merge all the import declarations into the first one.
var first *ast.GenDecl
for i := 0; i < len(f.Decls); i++ {
decl := f.Decls[i]
gen, ok := decl.(*ast.GenDecl)
if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") {
continue
}
if first == nil {
first = gen
continue // Don't touch the first one.
}
// We now know there is more than one package in this import
// declaration. Ensure that it ends up parenthesized.
first.Lparen = first.Pos()
// Move the imports of the other import declaration to the first one.
for _, spec := range gen.Specs {
spec.(*ast.ImportSpec).Path.ValuePos = first.Pos()
first.Specs = append(first.Specs, spec)
}
f.Decls = append(f.Decls[:i], f.Decls[i+1:]...)
i--
}
return true
}
func isThirdParty(importPath string) bool {
// Third party package import path usually contains "." (".com", ".org", ...)
// This logic is taken from golang.org/x/tools/imports package.
return strings.Contains(importPath, ".")
}
// DeleteImport deletes the import path from the file f, if present.
func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) {
return DeleteNamedImport(fset, f, "", path)
}
// DeleteNamedImport deletes the import with the given name and path from the file f, if present.
func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) {
var delspecs []*ast.ImportSpec
var delcomments []*ast.CommentGroup
// Find the import nodes that import path, if any.
for i := 0; i < len(f.Decls); i++ {
decl := f.Decls[i]
gen, ok := decl.(*ast.GenDecl)
if !ok || gen.Tok != token.IMPORT {
continue
}
for j := 0; j < len(gen.Specs); j++ {
spec := gen.Specs[j]
impspec := spec.(*ast.ImportSpec)
if impspec.Name == nil && name != "" {
continue
}
if impspec.Name != nil && impspec.Name.Name != name {
continue
}
if importPath(impspec) != path {
continue
}
// We found an import spec that imports path.
// Delete it.
delspecs = append(delspecs, impspec)
deleted = true
copy(gen.Specs[j:], gen.Specs[j+1:])
gen.Specs = gen.Specs[:len(gen.Specs)-1]
// If this was the last import spec in this decl,
// delete the decl, too.
if len(gen.Specs) == 0 {
copy(f.Decls[i:], f.Decls[i+1:])
f.Decls = f.Decls[:len(f.Decls)-1]
i--
break
} else if len(gen.Specs) == 1 {
if impspec.Doc != nil {
delcomments = append(delcomments, impspec.Doc)
}
if impspec.Comment != nil {
delcomments = append(delcomments, impspec.Comment)
}
for _, cg := range f.Comments {
// Found comment on the same line as the import spec.
if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line {
delcomments = append(delcomments, cg)
break
}
}
spec := gen.Specs[0].(*ast.ImportSpec)
// Move the documentation right after the import decl.
if spec.Doc != nil {
for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line {
fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line)
}
}
for _, cg := range f.Comments {
if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line {
for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line {
fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line)
}
break
}
}
}
if j > 0 {
lastImpspec := gen.Specs[j-1].(*ast.ImportSpec)
lastLine := fset.Position(lastImpspec.Path.ValuePos).Line
line := fset.Position(impspec.Path.ValuePos).Line
// We deleted an entry but now there may be
// a blank line-sized hole where the import was.
if line-lastLine > 1 {
// There was a blank line immediately preceding the deleted import,
// so there's no need to close the hole.
// Do nothing.
} else if line != fset.File(gen.Rparen).LineCount() {
// There was no blank line. Close the hole.
fset.File(gen.Rparen).MergeLine(line)
}
}
j--
}
}
// Delete imports from f.Imports.
for i := 0; i < len(f.Imports); i++ {
imp := f.Imports[i]
for j, del := range delspecs {
if imp == del {
copy(f.Imports[i:], f.Imports[i+1:])
f.Imports = f.Imports[:len(f.Imports)-1]
copy(delspecs[j:], delspecs[j+1:])
delspecs = delspecs[:len(delspecs)-1]
i--
break
}
}
}
// Delete comments from f.Comments.
for i := 0; i < len(f.Comments); i++ {
cg := f.Comments[i]
for j, del := range delcomments {
if cg == del {
copy(f.Comments[i:], f.Comments[i+1:])
f.Comments = f.Comments[:len(f.Comments)-1]
copy(delcomments[j:], delcomments[j+1:])
delcomments = delcomments[:len(delcomments)-1]
i--
break
}
}
}
if len(delspecs) > 0 {
panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs))
}
return
}
// RewriteImport rewrites any import of path oldPath to path newPath.
func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) {
for _, imp := range f.Imports {
if importPath(imp) == oldPath {
rewrote = true
// record old End, because the default is to compute
// it using the length of imp.Path.Value.
imp.EndPos = imp.End()
imp.Path.Value = strconv.Quote(newPath)
}
}
return
}
// UsesImport reports whether a given import is used.
func UsesImport(f *ast.File, path string) (used bool) {
spec := importSpec(f, path)
if spec == nil {
return
}
name := spec.Name.String()
switch name {
case "<nil>":
// If the package name is not explicitly specified,
// make an educated guess. This is not guaranteed to be correct.
lastSlash := strings.LastIndex(path, "/")
if lastSlash == -1 {
name = path
} else {
name = path[lastSlash+1:]
}
case "_", ".":
// Not sure if this import is used - err on the side of caution.
return true
}
ast.Walk(visitFn(func(n ast.Node) {
sel, ok := n.(*ast.SelectorExpr)
if ok && isTopName(sel.X, name) {
used = true
}
}), f)
return
}
type visitFn func(node ast.Node)
func (fn visitFn) Visit(node ast.Node) ast.Visitor {
fn(node)
return fn
}
// imports returns true if f imports path.
func imports(f *ast.File, path string) bool {
return importSpec(f, path) != nil
}
// importSpec returns the import spec if f imports path,
// or nil otherwise.
func importSpec(f *ast.File, path string) *ast.ImportSpec {
for _, s := range f.Imports {
if importPath(s) == path {
return s
}
}
return nil
}
// importPath returns the unquoted import path of s,
// or "" if the path is not properly quoted.
func importPath(s *ast.ImportSpec) string {
t, err := strconv.Unquote(s.Path.Value)
if err == nil {
return t
}
return ""
}
// declImports reports whether gen contains an import of path.
func declImports(gen *ast.GenDecl, path string) bool {
if gen.Tok != token.IMPORT {
return false
}
for _, spec := range gen.Specs {
impspec := spec.(*ast.ImportSpec)
if importPath(impspec) == path {
return true
}
}
return false
}
// matchLen returns the length of the longest path segment prefix shared by x and y.
func matchLen(x, y string) int {
n := 0
for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ {
if x[i] == '/' {
n++
}
}
return n
}
// isTopName returns true if n is a top-level unresolved identifier with the given name.
func isTopName(n ast.Expr, name string) bool {
id, ok := n.(*ast.Ident)
return ok && id.Name == name && id.Obj == nil
}
// Imports returns the file imports grouped by paragraph.
func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec {
var groups [][]*ast.ImportSpec
for _, decl := range f.Decls {
genDecl, ok := decl.(*ast.GenDecl)
if !ok || genDecl.Tok != token.IMPORT {
break
}
group := []*ast.ImportSpec{}
var lastLine int
for _, spec := range genDecl.Specs {
importSpec := spec.(*ast.ImportSpec)
pos := importSpec.Path.ValuePos
line := fset.Position(pos).Line
if lastLine > 0 && pos > 0 && line-lastLine > 1 {
groups = append(groups, group)
group = []*ast.ImportSpec{}
}
group = append(group, importSpec)
lastLine = line
}
groups = append(groups, group)
}
return groups
}

View file

@ -1,477 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package astutil
import (
"fmt"
"go/ast"
"reflect"
"sort"
)
// An ApplyFunc is invoked by Apply for each node n, even if n is nil,
// before and/or after the node's children, using a Cursor describing
// the current node and providing operations on it.
//
// The return value of ApplyFunc controls the syntax tree traversal.
// See Apply for details.
type ApplyFunc func(*Cursor) bool
// Apply traverses a syntax tree recursively, starting with root,
// and calling pre and post for each node as described below.
// Apply returns the syntax tree, possibly modified.
//
// If pre is not nil, it is called for each node before the node's
// children are traversed (pre-order). If pre returns false, no
// children are traversed, and post is not called for that node.
//
// If post is not nil, and a prior call of pre didn't return false,
// post is called for each node after its children are traversed
// (post-order). If post returns false, traversal is terminated and
// Apply returns immediately.
//
// Only fields that refer to AST nodes are considered children;
// i.e., token.Pos, Scopes, Objects, and fields of basic types
// (strings, etc.) are ignored.
//
// Children are traversed in the order in which they appear in the
// respective node's struct definition. A package's files are
// traversed in the filenames' alphabetical order.
//
func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) {
parent := &struct{ ast.Node }{root}
defer func() {
if r := recover(); r != nil && r != abort {
panic(r)
}
result = parent.Node
}()
a := &application{pre: pre, post: post}
a.apply(parent, "Node", nil, root)
return
}
var abort = new(int) // singleton, to signal termination of Apply
// A Cursor describes a node encountered during Apply.
// Information about the node and its parent is available
// from the Node, Parent, Name, and Index methods.
//
// If p is a variable of type and value of the current parent node
// c.Parent(), and f is the field identifier with name c.Name(),
// the following invariants hold:
//
// p.f == c.Node() if c.Index() < 0
// p.f[c.Index()] == c.Node() if c.Index() >= 0
//
// The methods Replace, Delete, InsertBefore, and InsertAfter
// can be used to change the AST without disrupting Apply.
type Cursor struct {
parent ast.Node
name string
iter *iterator // valid if non-nil
node ast.Node
}
// Node returns the current Node.
func (c *Cursor) Node() ast.Node { return c.node }
// Parent returns the parent of the current Node.
func (c *Cursor) Parent() ast.Node { return c.parent }
// Name returns the name of the parent Node field that contains the current Node.
// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns
// the filename for the current Node.
func (c *Cursor) Name() string { return c.name }
// Index reports the index >= 0 of the current Node in the slice of Nodes that
// contains it, or a value < 0 if the current Node is not part of a slice.
// The index of the current node changes if InsertBefore is called while
// processing the current node.
func (c *Cursor) Index() int {
if c.iter != nil {
return c.iter.index
}
return -1
}
// field returns the current node's parent field value.
func (c *Cursor) field() reflect.Value {
return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name)
}
// Replace replaces the current Node with n.
// The replacement node is not walked by Apply.
func (c *Cursor) Replace(n ast.Node) {
if _, ok := c.node.(*ast.File); ok {
file, ok := n.(*ast.File)
if !ok {
panic("attempt to replace *ast.File with non-*ast.File")
}
c.parent.(*ast.Package).Files[c.name] = file
return
}
v := c.field()
if i := c.Index(); i >= 0 {
v = v.Index(i)
}
v.Set(reflect.ValueOf(n))
}
// Delete deletes the current Node from its containing slice.
// If the current Node is not part of a slice, Delete panics.
// As a special case, if the current node is a package file,
// Delete removes it from the package's Files map.
func (c *Cursor) Delete() {
if _, ok := c.node.(*ast.File); ok {
delete(c.parent.(*ast.Package).Files, c.name)
return
}
i := c.Index()
if i < 0 {
panic("Delete node not contained in slice")
}
v := c.field()
l := v.Len()
reflect.Copy(v.Slice(i, l), v.Slice(i+1, l))
v.Index(l - 1).Set(reflect.Zero(v.Type().Elem()))
v.SetLen(l - 1)
c.iter.step--
}
// InsertAfter inserts n after the current Node in its containing slice.
// If the current Node is not part of a slice, InsertAfter panics.
// Apply does not walk n.
func (c *Cursor) InsertAfter(n ast.Node) {
i := c.Index()
if i < 0 {
panic("InsertAfter node not contained in slice")
}
v := c.field()
v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem())))
l := v.Len()
reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l))
v.Index(i + 1).Set(reflect.ValueOf(n))
c.iter.step++
}
// InsertBefore inserts n before the current Node in its containing slice.
// If the current Node is not part of a slice, InsertBefore panics.
// Apply will not walk n.
func (c *Cursor) InsertBefore(n ast.Node) {
i := c.Index()
if i < 0 {
panic("InsertBefore node not contained in slice")
}
v := c.field()
v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem())))
l := v.Len()
reflect.Copy(v.Slice(i+1, l), v.Slice(i, l))
v.Index(i).Set(reflect.ValueOf(n))
c.iter.index++
}
// application carries all the shared data so we can pass it around cheaply.
type application struct {
pre, post ApplyFunc
cursor Cursor
iter iterator
}
func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) {
// convert typed nil into untyped nil
if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() {
n = nil
}
// avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead
saved := a.cursor
a.cursor.parent = parent
a.cursor.name = name
a.cursor.iter = iter
a.cursor.node = n
if a.pre != nil && !a.pre(&a.cursor) {
a.cursor = saved
return
}
// walk children
// (the order of the cases matches the order of the corresponding node types in go/ast)
switch n := n.(type) {
case nil:
// nothing to do
// Comments and fields
case *ast.Comment:
// nothing to do
case *ast.CommentGroup:
if n != nil {
a.applyList(n, "List")
}
case *ast.Field:
a.apply(n, "Doc", nil, n.Doc)
a.applyList(n, "Names")
a.apply(n, "Type", nil, n.Type)
a.apply(n, "Tag", nil, n.Tag)
a.apply(n, "Comment", nil, n.Comment)
case *ast.FieldList:
a.applyList(n, "List")
// Expressions
case *ast.BadExpr, *ast.Ident, *ast.BasicLit:
// nothing to do
case *ast.Ellipsis:
a.apply(n, "Elt", nil, n.Elt)
case *ast.FuncLit:
a.apply(n, "Type", nil, n.Type)
a.apply(n, "Body", nil, n.Body)
case *ast.CompositeLit:
a.apply(n, "Type", nil, n.Type)
a.applyList(n, "Elts")
case *ast.ParenExpr:
a.apply(n, "X", nil, n.X)
case *ast.SelectorExpr:
a.apply(n, "X", nil, n.X)
a.apply(n, "Sel", nil, n.Sel)
case *ast.IndexExpr:
a.apply(n, "X", nil, n.X)
a.apply(n, "Index", nil, n.Index)
case *ast.SliceExpr:
a.apply(n, "X", nil, n.X)
a.apply(n, "Low", nil, n.Low)
a.apply(n, "High", nil, n.High)
a.apply(n, "Max", nil, n.Max)
case *ast.TypeAssertExpr:
a.apply(n, "X", nil, n.X)
a.apply(n, "Type", nil, n.Type)
case *ast.CallExpr:
a.apply(n, "Fun", nil, n.Fun)
a.applyList(n, "Args")
case *ast.StarExpr:
a.apply(n, "X", nil, n.X)
case *ast.UnaryExpr:
a.apply(n, "X", nil, n.X)
case *ast.BinaryExpr:
a.apply(n, "X", nil, n.X)
a.apply(n, "Y", nil, n.Y)
case *ast.KeyValueExpr:
a.apply(n, "Key", nil, n.Key)
a.apply(n, "Value", nil, n.Value)
// Types
case *ast.ArrayType:
a.apply(n, "Len", nil, n.Len)
a.apply(n, "Elt", nil, n.Elt)
case *ast.StructType:
a.apply(n, "Fields", nil, n.Fields)
case *ast.FuncType:
a.apply(n, "Params", nil, n.Params)
a.apply(n, "Results", nil, n.Results)
case *ast.InterfaceType:
a.apply(n, "Methods", nil, n.Methods)
case *ast.MapType:
a.apply(n, "Key", nil, n.Key)
a.apply(n, "Value", nil, n.Value)
case *ast.ChanType:
a.apply(n, "Value", nil, n.Value)
// Statements
case *ast.BadStmt:
// nothing to do
case *ast.DeclStmt:
a.apply(n, "Decl", nil, n.Decl)
case *ast.EmptyStmt:
// nothing to do
case *ast.LabeledStmt:
a.apply(n, "Label", nil, n.Label)
a.apply(n, "Stmt", nil, n.Stmt)
case *ast.ExprStmt:
a.apply(n, "X", nil, n.X)
case *ast.SendStmt:
a.apply(n, "Chan", nil, n.Chan)
a.apply(n, "Value", nil, n.Value)
case *ast.IncDecStmt:
a.apply(n, "X", nil, n.X)
case *ast.AssignStmt:
a.applyList(n, "Lhs")
a.applyList(n, "Rhs")
case *ast.GoStmt:
a.apply(n, "Call", nil, n.Call)
case *ast.DeferStmt:
a.apply(n, "Call", nil, n.Call)
case *ast.ReturnStmt:
a.applyList(n, "Results")
case *ast.BranchStmt:
a.apply(n, "Label", nil, n.Label)
case *ast.BlockStmt:
a.applyList(n, "List")
case *ast.IfStmt:
a.apply(n, "Init", nil, n.Init)
a.apply(n, "Cond", nil, n.Cond)
a.apply(n, "Body", nil, n.Body)
a.apply(n, "Else", nil, n.Else)
case *ast.CaseClause:
a.applyList(n, "List")
a.applyList(n, "Body")
case *ast.SwitchStmt:
a.apply(n, "Init", nil, n.Init)
a.apply(n, "Tag", nil, n.Tag)
a.apply(n, "Body", nil, n.Body)
case *ast.TypeSwitchStmt:
a.apply(n, "Init", nil, n.Init)
a.apply(n, "Assign", nil, n.Assign)
a.apply(n, "Body", nil, n.Body)
case *ast.CommClause:
a.apply(n, "Comm", nil, n.Comm)
a.applyList(n, "Body")
case *ast.SelectStmt:
a.apply(n, "Body", nil, n.Body)
case *ast.ForStmt:
a.apply(n, "Init", nil, n.Init)
a.apply(n, "Cond", nil, n.Cond)
a.apply(n, "Post", nil, n.Post)
a.apply(n, "Body", nil, n.Body)
case *ast.RangeStmt:
a.apply(n, "Key", nil, n.Key)
a.apply(n, "Value", nil, n.Value)
a.apply(n, "X", nil, n.X)
a.apply(n, "Body", nil, n.Body)
// Declarations
case *ast.ImportSpec:
a.apply(n, "Doc", nil, n.Doc)
a.apply(n, "Name", nil, n.Name)
a.apply(n, "Path", nil, n.Path)
a.apply(n, "Comment", nil, n.Comment)
case *ast.ValueSpec:
a.apply(n, "Doc", nil, n.Doc)
a.applyList(n, "Names")
a.apply(n, "Type", nil, n.Type)
a.applyList(n, "Values")
a.apply(n, "Comment", nil, n.Comment)
case *ast.TypeSpec:
a.apply(n, "Doc", nil, n.Doc)
a.apply(n, "Name", nil, n.Name)
a.apply(n, "Type", nil, n.Type)
a.apply(n, "Comment", nil, n.Comment)
case *ast.BadDecl:
// nothing to do
case *ast.GenDecl:
a.apply(n, "Doc", nil, n.Doc)
a.applyList(n, "Specs")
case *ast.FuncDecl:
a.apply(n, "Doc", nil, n.Doc)
a.apply(n, "Recv", nil, n.Recv)
a.apply(n, "Name", nil, n.Name)
a.apply(n, "Type", nil, n.Type)
a.apply(n, "Body", nil, n.Body)
// Files and packages
case *ast.File:
a.apply(n, "Doc", nil, n.Doc)
a.apply(n, "Name", nil, n.Name)
a.applyList(n, "Decls")
// Don't walk n.Comments; they have either been walked already if
// they are Doc comments, or they can be easily walked explicitly.
case *ast.Package:
// collect and sort names for reproducible behavior
var names []string
for name := range n.Files {
names = append(names, name)
}
sort.Strings(names)
for _, name := range names {
a.apply(n, name, nil, n.Files[name])
}
default:
panic(fmt.Sprintf("Apply: unexpected node type %T", n))
}
if a.post != nil && !a.post(&a.cursor) {
panic(abort)
}
a.cursor = saved
}
// An iterator controls iteration over a slice of nodes.
type iterator struct {
index, step int
}
func (a *application) applyList(parent ast.Node, name string) {
// avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead
saved := a.iter
a.iter.index = 0
for {
// must reload parent.name each time, since cursor modifications might change it
v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name)
if a.iter.index >= v.Len() {
break
}
// element x may be nil in a bad AST - be cautious
var x ast.Node
if e := v.Index(a.iter.index); e.IsValid() {
x = e.Interface().(ast.Node)
}
a.iter.step = 1
a.apply(parent, name, &a.iter, x)
a.iter.index += a.iter.step
}
a.iter = saved
}

View file

@ -1,14 +0,0 @@
package astutil
import "go/ast"
// Unparen returns e with any enclosing parentheses stripped.
func Unparen(e ast.Expr) ast.Expr {
for {
p, ok := e.(*ast.ParenExpr)
if !ok {
return e
}
e = p.X
}
}

View file

@ -1,198 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package buildutil provides utilities related to the go/build
// package in the standard library.
//
// All I/O is done via the build.Context file system interface, which must
// be concurrency-safe.
package buildutil // import "golang.org/x/tools/go/buildutil"
import (
"go/build"
"os"
"path/filepath"
"sort"
"strings"
"sync"
)
// AllPackages returns the package path of each Go package in any source
// directory of the specified build context (e.g. $GOROOT or an element
// of $GOPATH). Errors are ignored. The results are sorted.
// All package paths are canonical, and thus may contain "/vendor/".
//
// The result may include import paths for directories that contain no
// *.go files, such as "archive" (in $GOROOT/src).
//
// All I/O is done via the build.Context file system interface,
// which must be concurrency-safe.
//
func AllPackages(ctxt *build.Context) []string {
var list []string
ForEachPackage(ctxt, func(pkg string, _ error) {
list = append(list, pkg)
})
sort.Strings(list)
return list
}
// ForEachPackage calls the found function with the package path of
// each Go package it finds in any source directory of the specified
// build context (e.g. $GOROOT or an element of $GOPATH).
// All package paths are canonical, and thus may contain "/vendor/".
//
// If the package directory exists but could not be read, the second
// argument to the found function provides the error.
//
// All I/O is done via the build.Context file system interface,
// which must be concurrency-safe.
//
func ForEachPackage(ctxt *build.Context, found func(importPath string, err error)) {
ch := make(chan item)
var wg sync.WaitGroup
for _, root := range ctxt.SrcDirs() {
root := root
wg.Add(1)
go func() {
allPackages(ctxt, root, ch)
wg.Done()
}()
}
go func() {
wg.Wait()
close(ch)
}()
// All calls to found occur in the caller's goroutine.
for i := range ch {
found(i.importPath, i.err)
}
}
type item struct {
importPath string
err error // (optional)
}
// We use a process-wide counting semaphore to limit
// the number of parallel calls to ReadDir.
var ioLimit = make(chan bool, 20)
func allPackages(ctxt *build.Context, root string, ch chan<- item) {
root = filepath.Clean(root) + string(os.PathSeparator)
var wg sync.WaitGroup
var walkDir func(dir string)
walkDir = func(dir string) {
// Avoid .foo, _foo, and testdata directory trees.
base := filepath.Base(dir)
if base == "" || base[0] == '.' || base[0] == '_' || base == "testdata" {
return
}
pkg := filepath.ToSlash(strings.TrimPrefix(dir, root))
// Prune search if we encounter any of these import paths.
switch pkg {
case "builtin":
return
}
ioLimit <- true
files, err := ReadDir(ctxt, dir)
<-ioLimit
if pkg != "" || err != nil {
ch <- item{pkg, err}
}
for _, fi := range files {
fi := fi
if fi.IsDir() {
wg.Add(1)
go func() {
walkDir(filepath.Join(dir, fi.Name()))
wg.Done()
}()
}
}
}
walkDir(root)
wg.Wait()
}
// ExpandPatterns returns the set of packages matched by patterns,
// which may have the following forms:
//
// golang.org/x/tools/cmd/guru # a single package
// golang.org/x/tools/... # all packages beneath dir
// ... # the entire workspace.
//
// Order is significant: a pattern preceded by '-' removes matching
// packages from the set. For example, these patterns match all encoding
// packages except encoding/xml:
//
// encoding/... -encoding/xml
//
// A trailing slash in a pattern is ignored. (Path components of Go
// package names are separated by slash, not the platform's path separator.)
//
func ExpandPatterns(ctxt *build.Context, patterns []string) map[string]bool {
// TODO(adonovan): support other features of 'go list':
// - "std"/"cmd"/"all" meta-packages
// - "..." not at the end of a pattern
// - relative patterns using "./" or "../" prefix
pkgs := make(map[string]bool)
doPkg := func(pkg string, neg bool) {
if neg {
delete(pkgs, pkg)
} else {
pkgs[pkg] = true
}
}
// Scan entire workspace if wildcards are present.
// TODO(adonovan): opt: scan only the necessary subtrees of the workspace.
var all []string
for _, arg := range patterns {
if strings.HasSuffix(arg, "...") {
all = AllPackages(ctxt)
break
}
}
for _, arg := range patterns {
if arg == "" {
continue
}
neg := arg[0] == '-'
if neg {
arg = arg[1:]
}
if arg == "..." {
// ... matches all packages
for _, pkg := range all {
doPkg(pkg, neg)
}
} else if dir := strings.TrimSuffix(arg, "/..."); dir != arg {
// dir/... matches all packages beneath dir
for _, pkg := range all {
if strings.HasPrefix(pkg, dir) &&
(len(pkg) == len(dir) || pkg[len(dir)] == '/') {
doPkg(pkg, neg)
}
}
} else {
// single package
doPkg(strings.TrimSuffix(arg, "/"), neg)
}
}
return pkgs
}

View file

@ -1,109 +0,0 @@
package buildutil
import (
"fmt"
"go/build"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"sort"
"strings"
"time"
)
// FakeContext returns a build.Context for the fake file tree specified
// by pkgs, which maps package import paths to a mapping from file base
// names to contents.
//
// The fake Context has a GOROOT of "/go" and no GOPATH, and overrides
// the necessary file access methods to read from memory instead of the
// real file system.
//
// Unlike a real file tree, the fake one has only two levels---packages
// and files---so ReadDir("/go/src/") returns all packages under
// /go/src/ including, for instance, "math" and "math/big".
// ReadDir("/go/src/math/big") would return all the files in the
// "math/big" package.
//
func FakeContext(pkgs map[string]map[string]string) *build.Context {
clean := func(filename string) string {
f := path.Clean(filepath.ToSlash(filename))
// Removing "/go/src" while respecting segment
// boundaries has this unfortunate corner case:
if f == "/go/src" {
return ""
}
return strings.TrimPrefix(f, "/go/src/")
}
ctxt := build.Default // copy
ctxt.GOROOT = "/go"
ctxt.GOPATH = ""
ctxt.Compiler = "gc"
ctxt.IsDir = func(dir string) bool {
dir = clean(dir)
if dir == "" {
return true // needed by (*build.Context).SrcDirs
}
return pkgs[dir] != nil
}
ctxt.ReadDir = func(dir string) ([]os.FileInfo, error) {
dir = clean(dir)
var fis []os.FileInfo
if dir == "" {
// enumerate packages
for importPath := range pkgs {
fis = append(fis, fakeDirInfo(importPath))
}
} else {
// enumerate files of package
for basename := range pkgs[dir] {
fis = append(fis, fakeFileInfo(basename))
}
}
sort.Sort(byName(fis))
return fis, nil
}
ctxt.OpenFile = func(filename string) (io.ReadCloser, error) {
filename = clean(filename)
dir, base := path.Split(filename)
content, ok := pkgs[path.Clean(dir)][base]
if !ok {
return nil, fmt.Errorf("file not found: %s", filename)
}
return ioutil.NopCloser(strings.NewReader(content)), nil
}
ctxt.IsAbsPath = func(path string) bool {
path = filepath.ToSlash(path)
// Don't rely on the default (filepath.Path) since on
// Windows, it reports virtual paths as non-absolute.
return strings.HasPrefix(path, "/")
}
return &ctxt
}
type byName []os.FileInfo
func (s byName) Len() int { return len(s) }
func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() }
type fakeFileInfo string
func (fi fakeFileInfo) Name() string { return string(fi) }
func (fakeFileInfo) Sys() interface{} { return nil }
func (fakeFileInfo) ModTime() time.Time { return time.Time{} }
func (fakeFileInfo) IsDir() bool { return false }
func (fakeFileInfo) Size() int64 { return 0 }
func (fakeFileInfo) Mode() os.FileMode { return 0644 }
type fakeDirInfo string
func (fd fakeDirInfo) Name() string { return string(fd) }
func (fakeDirInfo) Sys() interface{} { return nil }
func (fakeDirInfo) ModTime() time.Time { return time.Time{} }
func (fakeDirInfo) IsDir() bool { return true }
func (fakeDirInfo) Size() int64 { return 0 }
func (fakeDirInfo) Mode() os.FileMode { return 0755 }

View file

@ -1,103 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package buildutil
import (
"bufio"
"bytes"
"fmt"
"go/build"
"io"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
)
// OverlayContext overlays a build.Context with additional files from
// a map. Files in the map take precedence over other files.
//
// In addition to plain string comparison, two file names are
// considered equal if their base names match and their directory
// components point at the same directory on the file system. That is,
// symbolic links are followed for directories, but not files.
//
// A common use case for OverlayContext is to allow editors to pass in
// a set of unsaved, modified files.
//
// Currently, only the Context.OpenFile function will respect the
// overlay. This may change in the future.
func OverlayContext(orig *build.Context, overlay map[string][]byte) *build.Context {
// TODO(dominikh): Implement IsDir, HasSubdir and ReadDir
rc := func(data []byte) (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewBuffer(data)), nil
}
copy := *orig // make a copy
ctxt := &copy
ctxt.OpenFile = func(path string) (io.ReadCloser, error) {
// Fast path: names match exactly.
if content, ok := overlay[path]; ok {
return rc(content)
}
// Slow path: check for same file under a different
// alias, perhaps due to a symbolic link.
for filename, content := range overlay {
if sameFile(path, filename) {
return rc(content)
}
}
return OpenFile(orig, path)
}
return ctxt
}
// ParseOverlayArchive parses an archive containing Go files and their
// contents. The result is intended to be used with OverlayContext.
//
//
// Archive format
//
// The archive consists of a series of files. Each file consists of a
// name, a decimal file size and the file contents, separated by
// newlinews. No newline follows after the file contents.
func ParseOverlayArchive(archive io.Reader) (map[string][]byte, error) {
overlay := make(map[string][]byte)
r := bufio.NewReader(archive)
for {
// Read file name.
filename, err := r.ReadString('\n')
if err != nil {
if err == io.EOF {
break // OK
}
return nil, fmt.Errorf("reading archive file name: %v", err)
}
filename = filepath.Clean(strings.TrimSpace(filename))
// Read file size.
sz, err := r.ReadString('\n')
if err != nil {
return nil, fmt.Errorf("reading size of archive file %s: %v", filename, err)
}
sz = strings.TrimSpace(sz)
size, err := strconv.ParseUint(sz, 10, 32)
if err != nil {
return nil, fmt.Errorf("parsing size of archive file %s: %v", filename, err)
}
// Read file content.
content := make([]byte, size)
if _, err := io.ReadFull(r, content); err != nil {
return nil, fmt.Errorf("reading archive file %s: %v", filename, err)
}
overlay[filename] = content
}
return overlay, nil
}

View file

@ -1,75 +0,0 @@
package buildutil
// This logic was copied from stringsFlag from $GOROOT/src/cmd/go/build.go.
import "fmt"
const TagsFlagDoc = "a list of `build tags` to consider satisfied during the build. " +
"For more information about build tags, see the description of " +
"build constraints in the documentation for the go/build package"
// TagsFlag is an implementation of the flag.Value and flag.Getter interfaces that parses
// a flag value in the same manner as go build's -tags flag and
// populates a []string slice.
//
// See $GOROOT/src/go/build/doc.go for description of build tags.
// See $GOROOT/src/cmd/go/doc.go for description of 'go build -tags' flag.
//
// Example:
// flag.Var((*buildutil.TagsFlag)(&build.Default.BuildTags), "tags", buildutil.TagsFlagDoc)
type TagsFlag []string
func (v *TagsFlag) Set(s string) error {
var err error
*v, err = splitQuotedFields(s)
if *v == nil {
*v = []string{}
}
return err
}
func (v *TagsFlag) Get() interface{} { return *v }
func splitQuotedFields(s string) ([]string, error) {
// Split fields allowing '' or "" around elements.
// Quotes further inside the string do not count.
var f []string
for len(s) > 0 {
for len(s) > 0 && isSpaceByte(s[0]) {
s = s[1:]
}
if len(s) == 0 {
break
}
// Accepted quoted string. No unescaping inside.
if s[0] == '"' || s[0] == '\'' {
quote := s[0]
s = s[1:]
i := 0
for i < len(s) && s[i] != quote {
i++
}
if i >= len(s) {
return nil, fmt.Errorf("unterminated %c string", quote)
}
f = append(f, s[:i])
s = s[i+1:]
continue
}
i := 0
for i < len(s) && !isSpaceByte(s[i]) {
i++
}
f = append(f, s[:i])
s = s[i:]
}
return f, nil
}
func (v *TagsFlag) String() string {
return "<tagsFlag>"
}
func isSpaceByte(c byte) bool {
return c == ' ' || c == '\t' || c == '\n' || c == '\r'
}

View file

@ -1,212 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package buildutil
import (
"fmt"
"go/ast"
"go/build"
"go/parser"
"go/token"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
)
// ParseFile behaves like parser.ParseFile,
// but uses the build context's file system interface, if any.
//
// If file is not absolute (as defined by IsAbsPath), the (dir, file)
// components are joined using JoinPath; dir must be absolute.
//
// The displayPath function, if provided, is used to transform the
// filename that will be attached to the ASTs.
//
// TODO(adonovan): call this from go/loader.parseFiles when the tree thaws.
//
func ParseFile(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, file string, mode parser.Mode) (*ast.File, error) {
if !IsAbsPath(ctxt, file) {
file = JoinPath(ctxt, dir, file)
}
rd, err := OpenFile(ctxt, file)
if err != nil {
return nil, err
}
defer rd.Close() // ignore error
if displayPath != nil {
file = displayPath(file)
}
return parser.ParseFile(fset, file, rd, mode)
}
// ContainingPackage returns the package containing filename.
//
// If filename is not absolute, it is interpreted relative to working directory dir.
// All I/O is via the build context's file system interface, if any.
//
// The '...Files []string' fields of the resulting build.Package are not
// populated (build.FindOnly mode).
//
func ContainingPackage(ctxt *build.Context, dir, filename string) (*build.Package, error) {
if !IsAbsPath(ctxt, filename) {
filename = JoinPath(ctxt, dir, filename)
}
// We must not assume the file tree uses
// "/" always,
// `\` always,
// or os.PathSeparator (which varies by platform),
// but to make any progress, we are forced to assume that
// paths will not use `\` unless the PathSeparator
// is also `\`, thus we can rely on filepath.ToSlash for some sanity.
dirSlash := path.Dir(filepath.ToSlash(filename)) + "/"
// We assume that no source root (GOPATH[i] or GOROOT) contains any other.
for _, srcdir := range ctxt.SrcDirs() {
srcdirSlash := filepath.ToSlash(srcdir) + "/"
if importPath, ok := HasSubdir(ctxt, srcdirSlash, dirSlash); ok {
return ctxt.Import(importPath, dir, build.FindOnly)
}
}
return nil, fmt.Errorf("can't find package containing %s", filename)
}
// -- Effective methods of file system interface -------------------------
// (go/build.Context defines these as methods, but does not export them.)
// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses
// the local file system to answer the question.
func HasSubdir(ctxt *build.Context, root, dir string) (rel string, ok bool) {
if f := ctxt.HasSubdir; f != nil {
return f(root, dir)
}
// Try using paths we received.
if rel, ok = hasSubdir(root, dir); ok {
return
}
// Try expanding symlinks and comparing
// expanded against unexpanded and
// expanded against expanded.
rootSym, _ := filepath.EvalSymlinks(root)
dirSym, _ := filepath.EvalSymlinks(dir)
if rel, ok = hasSubdir(rootSym, dir); ok {
return
}
if rel, ok = hasSubdir(root, dirSym); ok {
return
}
return hasSubdir(rootSym, dirSym)
}
func hasSubdir(root, dir string) (rel string, ok bool) {
const sep = string(filepath.Separator)
root = filepath.Clean(root)
if !strings.HasSuffix(root, sep) {
root += sep
}
dir = filepath.Clean(dir)
if !strings.HasPrefix(dir, root) {
return "", false
}
return filepath.ToSlash(dir[len(root):]), true
}
// FileExists returns true if the specified file exists,
// using the build context's file system interface.
func FileExists(ctxt *build.Context, path string) bool {
if ctxt.OpenFile != nil {
r, err := ctxt.OpenFile(path)
if err != nil {
return false
}
r.Close() // ignore error
return true
}
_, err := os.Stat(path)
return err == nil
}
// OpenFile behaves like os.Open,
// but uses the build context's file system interface, if any.
func OpenFile(ctxt *build.Context, path string) (io.ReadCloser, error) {
if ctxt.OpenFile != nil {
return ctxt.OpenFile(path)
}
return os.Open(path)
}
// IsAbsPath behaves like filepath.IsAbs,
// but uses the build context's file system interface, if any.
func IsAbsPath(ctxt *build.Context, path string) bool {
if ctxt.IsAbsPath != nil {
return ctxt.IsAbsPath(path)
}
return filepath.IsAbs(path)
}
// JoinPath behaves like filepath.Join,
// but uses the build context's file system interface, if any.
func JoinPath(ctxt *build.Context, path ...string) string {
if ctxt.JoinPath != nil {
return ctxt.JoinPath(path...)
}
return filepath.Join(path...)
}
// IsDir behaves like os.Stat plus IsDir,
// but uses the build context's file system interface, if any.
func IsDir(ctxt *build.Context, path string) bool {
if ctxt.IsDir != nil {
return ctxt.IsDir(path)
}
fi, err := os.Stat(path)
return err == nil && fi.IsDir()
}
// ReadDir behaves like ioutil.ReadDir,
// but uses the build context's file system interface, if any.
func ReadDir(ctxt *build.Context, path string) ([]os.FileInfo, error) {
if ctxt.ReadDir != nil {
return ctxt.ReadDir(path)
}
return ioutil.ReadDir(path)
}
// SplitPathList behaves like filepath.SplitList,
// but uses the build context's file system interface, if any.
func SplitPathList(ctxt *build.Context, s string) []string {
if ctxt.SplitPathList != nil {
return ctxt.SplitPathList(s)
}
return filepath.SplitList(s)
}
// sameFile returns true if x and y have the same basename and denote
// the same file.
//
func sameFile(x, y string) bool {
if path.Clean(x) == path.Clean(y) {
return true
}
if filepath.Base(x) == filepath.Base(y) { // (optimisation)
if xi, err := os.Stat(x); err == nil {
if yi, err := os.Stat(y); err == nil {
return os.SameFile(xi, yi)
}
}
}
return false
}

View file

@ -1,109 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gcexportdata provides functions for locating, reading, and
// writing export data files containing type information produced by the
// gc compiler. This package supports go1.7 export data format and all
// later versions.
//
// Although it might seem convenient for this package to live alongside
// go/types in the standard library, this would cause version skew
// problems for developer tools that use it, since they must be able to
// consume the outputs of the gc compiler both before and after a Go
// update such as from Go 1.7 to Go 1.8. Because this package lives in
// golang.org/x/tools, sites can update their version of this repo some
// time before the Go 1.8 release and rebuild and redeploy their
// developer tools, which will then be able to consume both Go 1.7 and
// Go 1.8 export data files, so they will work before and after the
// Go update. (See discussion at https://github.com/golang/go/issues/15651.)
//
package gcexportdata // import "golang.org/x/tools/go/gcexportdata"
import (
"bufio"
"bytes"
"fmt"
"go/token"
"go/types"
"io"
"io/ioutil"
"golang.org/x/tools/go/internal/gcimporter"
)
// Find returns the name of an object (.o) or archive (.a) file
// containing type information for the specified import path,
// using the workspace layout conventions of go/build.
// If no file was found, an empty filename is returned.
//
// A relative srcDir is interpreted relative to the current working directory.
//
// Find also returns the package's resolved (canonical) import path,
// reflecting the effects of srcDir and vendoring on importPath.
func Find(importPath, srcDir string) (filename, path string) {
return gcimporter.FindPkg(importPath, srcDir)
}
// NewReader returns a reader for the export data section of an object
// (.o) or archive (.a) file read from r. The new reader may provide
// additional trailing data beyond the end of the export data.
func NewReader(r io.Reader) (io.Reader, error) {
buf := bufio.NewReader(r)
_, err := gcimporter.FindExportData(buf)
// If we ever switch to a zip-like archive format with the ToC
// at the end, we can return the correct portion of export data,
// but for now we must return the entire rest of the file.
return buf, err
}
// Read reads export data from in, decodes it, and returns type
// information for the package.
// The package name is specified by path.
// File position information is added to fset.
//
// Read may inspect and add to the imports map to ensure that references
// within the export data to other packages are consistent. The caller
// must ensure that imports[path] does not exist, or exists but is
// incomplete (see types.Package.Complete), and Read inserts the
// resulting package into this map entry.
//
// On return, the state of the reader is undefined.
func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) {
data, err := ioutil.ReadAll(in)
if err != nil {
return nil, fmt.Errorf("reading export data for %q: %v", path, err)
}
if bytes.HasPrefix(data, []byte("!<arch>")) {
return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path)
}
// The App Engine Go runtime v1.6 uses the old export data format.
// TODO(adonovan): delete once v1.7 has been around for a while.
if bytes.HasPrefix(data, []byte("package ")) {
return gcimporter.ImportData(imports, path, path, bytes.NewReader(data))
}
// The indexed export format starts with an 'i'; the older
// binary export format starts with a 'c', 'd', or 'v'
// (from "version"). Select appropriate importer.
if len(data) > 0 && data[0] == 'i' {
_, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
return pkg, err
}
_, pkg, err := gcimporter.BImportData(fset, imports, data, path)
return pkg, err
}
// Write writes encoded type information for the specified package to out.
// The FileSet provides file position information for named objects.
func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
b, err := gcimporter.BExportData(fset, pkg)
if err != nil {
return err
}
_, err = out.Write(b)
return err
}

View file

@ -1,73 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gcexportdata
import (
"fmt"
"go/token"
"go/types"
"os"
)
// NewImporter returns a new instance of the types.Importer interface
// that reads type information from export data files written by gc.
// The Importer also satisfies types.ImporterFrom.
//
// Export data files are located using "go build" workspace conventions
// and the build.Default context.
//
// Use this importer instead of go/importer.For("gc", ...) to avoid the
// version-skew problems described in the documentation of this package,
// or to control the FileSet or access the imports map populated during
// package loading.
//
func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom {
return importer{fset, imports}
}
type importer struct {
fset *token.FileSet
imports map[string]*types.Package
}
func (imp importer) Import(importPath string) (*types.Package, error) {
return imp.ImportFrom(importPath, "", 0)
}
func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) {
filename, path := Find(importPath, srcDir)
if filename == "" {
if importPath == "unsafe" {
// Even for unsafe, call Find first in case
// the package was vendored.
return types.Unsafe, nil
}
return nil, fmt.Errorf("can't find import: %s", importPath)
}
if pkg, ok := imp.imports[path]; ok && pkg.Complete() {
return pkg, nil // cache hit
}
// open file
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer func() {
f.Close()
if err != nil {
// add file name to error
err = fmt.Errorf("reading export data: %s: %v", filename, err)
}
}()
r, err := NewReader(f)
if err != nil {
return nil, err
}
return Read(r, imp.fset, imp.imports, path)
}

View file

@ -1,99 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build ignore
// The gcexportdata command is a diagnostic tool that displays the
// contents of gc export data files.
package main
import (
"flag"
"fmt"
"go/token"
"go/types"
"log"
"os"
"golang.org/x/tools/go/gcexportdata"
"golang.org/x/tools/go/types/typeutil"
)
var packageFlag = flag.String("package", "", "alternative package to print")
func main() {
log.SetPrefix("gcexportdata: ")
log.SetFlags(0)
flag.Usage = func() {
fmt.Fprintln(os.Stderr, "usage: gcexportdata [-package path] file.a")
}
flag.Parse()
if flag.NArg() != 1 {
flag.Usage()
os.Exit(2)
}
filename := flag.Args()[0]
f, err := os.Open(filename)
if err != nil {
log.Fatal(err)
}
r, err := gcexportdata.NewReader(f)
if err != nil {
log.Fatalf("%s: %s", filename, err)
}
// Decode the package.
const primary = "<primary>"
imports := make(map[string]*types.Package)
fset := token.NewFileSet()
pkg, err := gcexportdata.Read(r, fset, imports, primary)
if err != nil {
log.Fatalf("%s: %s", filename, err)
}
// Optionally select an indirectly mentioned package.
if *packageFlag != "" {
pkg = imports[*packageFlag]
if pkg == nil {
fmt.Fprintf(os.Stderr, "export data file %s does not mention %s; has:\n",
filename, *packageFlag)
for p := range imports {
if p != primary {
fmt.Fprintf(os.Stderr, "\t%s\n", p)
}
}
os.Exit(1)
}
}
// Print all package-level declarations, including non-exported ones.
fmt.Printf("package %s\n", pkg.Name())
for _, imp := range pkg.Imports() {
fmt.Printf("import %q\n", imp.Path())
}
qual := func(p *types.Package) string {
if pkg == p {
return ""
}
return p.Name()
}
scope := pkg.Scope()
for _, name := range scope.Names() {
obj := scope.Lookup(name)
fmt.Printf("%s: %s\n",
fset.Position(obj.Pos()),
types.ObjectString(obj, qual))
// For types, print each method.
if _, ok := obj.(*types.TypeName); ok {
for _, method := range typeutil.IntuitiveMethodSet(obj.Type(), nil) {
fmt.Printf("%s: %s\n",
fset.Position(method.Obj().Pos()),
types.SelectionString(method, qual))
}
}
}
}

View file

@ -1,220 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cgo
// This file handles cgo preprocessing of files containing `import "C"`.
//
// DESIGN
//
// The approach taken is to run the cgo processor on the package's
// CgoFiles and parse the output, faking the filenames of the
// resulting ASTs so that the synthetic file containing the C types is
// called "C" (e.g. "~/go/src/net/C") and the preprocessed files
// have their original names (e.g. "~/go/src/net/cgo_unix.go"),
// not the names of the actual temporary files.
//
// The advantage of this approach is its fidelity to 'go build'. The
// downside is that the token.Position.Offset for each AST node is
// incorrect, being an offset within the temporary file. Line numbers
// should still be correct because of the //line comments.
//
// The logic of this file is mostly plundered from the 'go build'
// tool, which also invokes the cgo preprocessor.
//
//
// REJECTED ALTERNATIVE
//
// An alternative approach that we explored is to extend go/types'
// Importer mechanism to provide the identity of the importing package
// so that each time `import "C"` appears it resolves to a different
// synthetic package containing just the objects needed in that case.
// The loader would invoke cgo but parse only the cgo_types.go file
// defining the package-level objects, discarding the other files
// resulting from preprocessing.
//
// The benefit of this approach would have been that source-level
// syntax information would correspond exactly to the original cgo
// file, with no preprocessing involved, making source tools like
// godoc, guru, and eg happy. However, the approach was rejected
// due to the additional complexity it would impose on go/types. (It
// made for a beautiful demo, though.)
//
// cgo files, despite their *.go extension, are not legal Go source
// files per the specification since they may refer to unexported
// members of package "C" such as C.int. Also, a function such as
// C.getpwent has in effect two types, one matching its C type and one
// which additionally returns (errno C.int). The cgo preprocessor
// uses name mangling to distinguish these two functions in the
// processed code, but go/types would need to duplicate this logic in
// its handling of function calls, analogous to the treatment of map
// lookups in which y=m[k] and y,ok=m[k] are both legal.
import (
"fmt"
"go/ast"
"go/build"
"go/parser"
"go/token"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
)
// ProcessFiles invokes the cgo preprocessor on bp.CgoFiles, parses
// the output and returns the resulting ASTs.
//
func ProcessFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) {
tmpdir, err := ioutil.TempDir("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C")
if err != nil {
return nil, err
}
defer os.RemoveAll(tmpdir)
pkgdir := bp.Dir
if DisplayPath != nil {
pkgdir = DisplayPath(pkgdir)
}
cgoFiles, cgoDisplayFiles, err := Run(bp, pkgdir, tmpdir, false)
if err != nil {
return nil, err
}
var files []*ast.File
for i := range cgoFiles {
rd, err := os.Open(cgoFiles[i])
if err != nil {
return nil, err
}
display := filepath.Join(bp.Dir, cgoDisplayFiles[i])
f, err := parser.ParseFile(fset, display, rd, mode)
rd.Close()
if err != nil {
return nil, err
}
files = append(files, f)
}
return files, nil
}
var cgoRe = regexp.MustCompile(`[/\\:]`)
// Run invokes the cgo preprocessor on bp.CgoFiles and returns two
// lists of files: the resulting processed files (in temporary
// directory tmpdir) and the corresponding names of the unprocessed files.
//
// Run is adapted from (*builder).cgo in
// $GOROOT/src/cmd/go/build.go, but these features are unsupported:
// Objective C, CGOPKGPATH, CGO_FLAGS.
//
// If useabs is set to true, absolute paths of the bp.CgoFiles will be passed in
// to the cgo preprocessor. This in turn will set the // line comments
// referring to those files to use absolute paths. This is needed for
// go/packages using the legacy go list support so it is able to find
// the original files.
func Run(bp *build.Package, pkgdir, tmpdir string, useabs bool) (files, displayFiles []string, err error) {
cgoCPPFLAGS, _, _, _ := cflags(bp, true)
_, cgoexeCFLAGS, _, _ := cflags(bp, false)
if len(bp.CgoPkgConfig) > 0 {
pcCFLAGS, err := pkgConfigFlags(bp)
if err != nil {
return nil, nil, err
}
cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...)
}
// Allows including _cgo_export.h from .[ch] files in the package.
cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", tmpdir)
// _cgo_gotypes.go (displayed "C") contains the type definitions.
files = append(files, filepath.Join(tmpdir, "_cgo_gotypes.go"))
displayFiles = append(displayFiles, "C")
for _, fn := range bp.CgoFiles {
// "foo.cgo1.go" (displayed "foo.go") is the processed Go source.
f := cgoRe.ReplaceAllString(fn[:len(fn)-len("go")], "_")
files = append(files, filepath.Join(tmpdir, f+"cgo1.go"))
displayFiles = append(displayFiles, fn)
}
var cgoflags []string
if bp.Goroot && bp.ImportPath == "runtime/cgo" {
cgoflags = append(cgoflags, "-import_runtime_cgo=false")
}
if bp.Goroot && bp.ImportPath == "runtime/race" || bp.ImportPath == "runtime/cgo" {
cgoflags = append(cgoflags, "-import_syscall=false")
}
var cgoFiles []string = bp.CgoFiles
if useabs {
cgoFiles = make([]string, len(bp.CgoFiles))
for i := range cgoFiles {
cgoFiles[i] = filepath.Join(pkgdir, bp.CgoFiles[i])
}
}
args := stringList(
"go", "tool", "cgo", "-objdir", tmpdir, cgoflags, "--",
cgoCPPFLAGS, cgoexeCFLAGS, cgoFiles,
)
if false {
log.Printf("Running cgo for package %q: %s (dir=%s)", bp.ImportPath, args, pkgdir)
}
cmd := exec.Command(args[0], args[1:]...)
cmd.Dir = pkgdir
cmd.Stdout = os.Stderr
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return nil, nil, fmt.Errorf("cgo failed: %s: %s", args, err)
}
return files, displayFiles, nil
}
// -- unmodified from 'go build' ---------------------------------------
// Return the flags to use when invoking the C or C++ compilers, or cgo.
func cflags(p *build.Package, def bool) (cppflags, cflags, cxxflags, ldflags []string) {
var defaults string
if def {
defaults = "-g -O2"
}
cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS)
cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS)
cxxflags = stringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS)
ldflags = stringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS)
return
}
// envList returns the value of the given environment variable broken
// into fields, using the default value when the variable is empty.
func envList(key, def string) []string {
v := os.Getenv(key)
if v == "" {
v = def
}
return strings.Fields(v)
}
// stringList's arguments should be a sequence of string or []string values.
// stringList flattens them into a single []string.
func stringList(args ...interface{}) []string {
var x []string
for _, arg := range args {
switch arg := arg.(type) {
case []string:
x = append(x, arg...)
case string:
x = append(x, arg)
default:
panic("stringList: invalid argument")
}
}
return x
}

View file

@ -1,39 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cgo
import (
"errors"
"fmt"
"go/build"
"os/exec"
"strings"
)
// pkgConfig runs pkg-config with the specified arguments and returns the flags it prints.
func pkgConfig(mode string, pkgs []string) (flags []string, err error) {
cmd := exec.Command("pkg-config", append([]string{mode}, pkgs...)...)
out, err := cmd.CombinedOutput()
if err != nil {
s := fmt.Sprintf("%s failed: %v", strings.Join(cmd.Args, " "), err)
if len(out) > 0 {
s = fmt.Sprintf("%s: %s", s, out)
}
return nil, errors.New(s)
}
if len(out) > 0 {
flags = strings.Fields(string(out))
}
return
}
// pkgConfigFlags calls pkg-config if needed and returns the cflags
// needed to build the package.
func pkgConfigFlags(p *build.Package) (cflags []string, err error) {
if len(p.CgoPkgConfig) == 0 {
return nil, nil
}
return pkgConfig("--cflags", p.CgoPkgConfig)
}

View file

@ -1,852 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Binary package export.
// This file was derived from $GOROOT/src/cmd/compile/internal/gc/bexport.go;
// see that file for specification of the format.
package gcimporter
import (
"bytes"
"encoding/binary"
"fmt"
"go/ast"
"go/constant"
"go/token"
"go/types"
"math"
"math/big"
"sort"
"strings"
)
// If debugFormat is set, each integer and string value is preceded by a marker
// and position information in the encoding. This mechanism permits an importer
// to recognize immediately when it is out of sync. The importer recognizes this
// mode automatically (i.e., it can import export data produced with debugging
// support even if debugFormat is not set at the time of import). This mode will
// lead to massively larger export data (by a factor of 2 to 3) and should only
// be enabled during development and debugging.
//
// NOTE: This flag is the first flag to enable if importing dies because of
// (suspected) format errors, and whenever a change is made to the format.
const debugFormat = false // default: false
// If trace is set, debugging output is printed to std out.
const trace = false // default: false
// Current export format version. Increase with each format change.
// Note: The latest binary (non-indexed) export format is at version 6.
// This exporter is still at level 4, but it doesn't matter since
// the binary importer can handle older versions just fine.
// 6: package height (CL 105038) -- NOT IMPLEMENTED HERE
// 5: improved position encoding efficiency (issue 20080, CL 41619) -- NOT IMPLEMEMTED HERE
// 4: type name objects support type aliases, uses aliasTag
// 3: Go1.8 encoding (same as version 2, aliasTag defined but never used)
// 2: removed unused bool in ODCL export (compiler only)
// 1: header format change (more regular), export package for _ struct fields
// 0: Go1.7 encoding
const exportVersion = 4
// trackAllTypes enables cycle tracking for all types, not just named
// types. The existing compiler invariants assume that unnamed types
// that are not completely set up are not used, or else there are spurious
// errors.
// If disabled, only named types are tracked, possibly leading to slightly
// less efficient encoding in rare cases. It also prevents the export of
// some corner-case type declarations (but those are not handled correctly
// with with the textual export format either).
// TODO(gri) enable and remove once issues caused by it are fixed
const trackAllTypes = false
type exporter struct {
fset *token.FileSet
out bytes.Buffer
// object -> index maps, indexed in order of serialization
strIndex map[string]int
pkgIndex map[*types.Package]int
typIndex map[types.Type]int
// position encoding
posInfoFormat bool
prevFile string
prevLine int
// debugging support
written int // bytes written
indent int // for trace
}
// internalError represents an error generated inside this package.
type internalError string
func (e internalError) Error() string { return "gcimporter: " + string(e) }
func internalErrorf(format string, args ...interface{}) error {
return internalError(fmt.Sprintf(format, args...))
}
// BExportData returns binary export data for pkg.
// If no file set is provided, position info will be missing.
func BExportData(fset *token.FileSet, pkg *types.Package) (b []byte, err error) {
defer func() {
if e := recover(); e != nil {
if ierr, ok := e.(internalError); ok {
err = ierr
return
}
// Not an internal error; panic again.
panic(e)
}
}()
p := exporter{
fset: fset,
strIndex: map[string]int{"": 0}, // empty string is mapped to 0
pkgIndex: make(map[*types.Package]int),
typIndex: make(map[types.Type]int),
posInfoFormat: true, // TODO(gri) might become a flag, eventually
}
// write version info
// The version string must start with "version %d" where %d is the version
// number. Additional debugging information may follow after a blank; that
// text is ignored by the importer.
p.rawStringln(fmt.Sprintf("version %d", exportVersion))
var debug string
if debugFormat {
debug = "debug"
}
p.rawStringln(debug) // cannot use p.bool since it's affected by debugFormat; also want to see this clearly
p.bool(trackAllTypes)
p.bool(p.posInfoFormat)
// --- generic export data ---
// populate type map with predeclared "known" types
for index, typ := range predeclared {
p.typIndex[typ] = index
}
if len(p.typIndex) != len(predeclared) {
return nil, internalError("duplicate entries in type map?")
}
// write package data
p.pkg(pkg, true)
if trace {
p.tracef("\n")
}
// write objects
objcount := 0
scope := pkg.Scope()
for _, name := range scope.Names() {
if !ast.IsExported(name) {
continue
}
if trace {
p.tracef("\n")
}
p.obj(scope.Lookup(name))
objcount++
}
// indicate end of list
if trace {
p.tracef("\n")
}
p.tag(endTag)
// for self-verification only (redundant)
p.int(objcount)
if trace {
p.tracef("\n")
}
// --- end of export data ---
return p.out.Bytes(), nil
}
func (p *exporter) pkg(pkg *types.Package, emptypath bool) {
if pkg == nil {
panic(internalError("unexpected nil pkg"))
}
// if we saw the package before, write its index (>= 0)
if i, ok := p.pkgIndex[pkg]; ok {
p.index('P', i)
return
}
// otherwise, remember the package, write the package tag (< 0) and package data
if trace {
p.tracef("P%d = { ", len(p.pkgIndex))
defer p.tracef("} ")
}
p.pkgIndex[pkg] = len(p.pkgIndex)
p.tag(packageTag)
p.string(pkg.Name())
if emptypath {
p.string("")
} else {
p.string(pkg.Path())
}
}
func (p *exporter) obj(obj types.Object) {
switch obj := obj.(type) {
case *types.Const:
p.tag(constTag)
p.pos(obj)
p.qualifiedName(obj)
p.typ(obj.Type())
p.value(obj.Val())
case *types.TypeName:
if obj.IsAlias() {
p.tag(aliasTag)
p.pos(obj)
p.qualifiedName(obj)
} else {
p.tag(typeTag)
}
p.typ(obj.Type())
case *types.Var:
p.tag(varTag)
p.pos(obj)
p.qualifiedName(obj)
p.typ(obj.Type())
case *types.Func:
p.tag(funcTag)
p.pos(obj)
p.qualifiedName(obj)
sig := obj.Type().(*types.Signature)
p.paramList(sig.Params(), sig.Variadic())
p.paramList(sig.Results(), false)
default:
panic(internalErrorf("unexpected object %v (%T)", obj, obj))
}
}
func (p *exporter) pos(obj types.Object) {
if !p.posInfoFormat {
return
}
file, line := p.fileLine(obj)
if file == p.prevFile {
// common case: write line delta
// delta == 0 means different file or no line change
delta := line - p.prevLine
p.int(delta)
if delta == 0 {
p.int(-1) // -1 means no file change
}
} else {
// different file
p.int(0)
// Encode filename as length of common prefix with previous
// filename, followed by (possibly empty) suffix. Filenames
// frequently share path prefixes, so this can save a lot
// of space and make export data size less dependent on file
// path length. The suffix is unlikely to be empty because
// file names tend to end in ".go".
n := commonPrefixLen(p.prevFile, file)
p.int(n) // n >= 0
p.string(file[n:]) // write suffix only
p.prevFile = file
p.int(line)
}
p.prevLine = line
}
func (p *exporter) fileLine(obj types.Object) (file string, line int) {
if p.fset != nil {
pos := p.fset.Position(obj.Pos())
file = pos.Filename
line = pos.Line
}
return
}
func commonPrefixLen(a, b string) int {
if len(a) > len(b) {
a, b = b, a
}
// len(a) <= len(b)
i := 0
for i < len(a) && a[i] == b[i] {
i++
}
return i
}
func (p *exporter) qualifiedName(obj types.Object) {
p.string(obj.Name())
p.pkg(obj.Pkg(), false)
}
func (p *exporter) typ(t types.Type) {
if t == nil {
panic(internalError("nil type"))
}
// Possible optimization: Anonymous pointer types *T where
// T is a named type are common. We could canonicalize all
// such types *T to a single type PT = *T. This would lead
// to at most one *T entry in typIndex, and all future *T's
// would be encoded as the respective index directly. Would
// save 1 byte (pointerTag) per *T and reduce the typIndex
// size (at the cost of a canonicalization map). We can do
// this later, without encoding format change.
// if we saw the type before, write its index (>= 0)
if i, ok := p.typIndex[t]; ok {
p.index('T', i)
return
}
// otherwise, remember the type, write the type tag (< 0) and type data
if trackAllTypes {
if trace {
p.tracef("T%d = {>\n", len(p.typIndex))
defer p.tracef("<\n} ")
}
p.typIndex[t] = len(p.typIndex)
}
switch t := t.(type) {
case *types.Named:
if !trackAllTypes {
// if we don't track all types, track named types now
p.typIndex[t] = len(p.typIndex)
}
p.tag(namedTag)
p.pos(t.Obj())
p.qualifiedName(t.Obj())
p.typ(t.Underlying())
if !types.IsInterface(t) {
p.assocMethods(t)
}
case *types.Array:
p.tag(arrayTag)
p.int64(t.Len())
p.typ(t.Elem())
case *types.Slice:
p.tag(sliceTag)
p.typ(t.Elem())
case *dddSlice:
p.tag(dddTag)
p.typ(t.elem)
case *types.Struct:
p.tag(structTag)
p.fieldList(t)
case *types.Pointer:
p.tag(pointerTag)
p.typ(t.Elem())
case *types.Signature:
p.tag(signatureTag)
p.paramList(t.Params(), t.Variadic())
p.paramList(t.Results(), false)
case *types.Interface:
p.tag(interfaceTag)
p.iface(t)
case *types.Map:
p.tag(mapTag)
p.typ(t.Key())
p.typ(t.Elem())
case *types.Chan:
p.tag(chanTag)
p.int(int(3 - t.Dir())) // hack
p.typ(t.Elem())
default:
panic(internalErrorf("unexpected type %T: %s", t, t))
}
}
func (p *exporter) assocMethods(named *types.Named) {
// Sort methods (for determinism).
var methods []*types.Func
for i := 0; i < named.NumMethods(); i++ {
methods = append(methods, named.Method(i))
}
sort.Sort(methodsByName(methods))
p.int(len(methods))
if trace && methods != nil {
p.tracef("associated methods {>\n")
}
for i, m := range methods {
if trace && i > 0 {
p.tracef("\n")
}
p.pos(m)
name := m.Name()
p.string(name)
if !exported(name) {
p.pkg(m.Pkg(), false)
}
sig := m.Type().(*types.Signature)
p.paramList(types.NewTuple(sig.Recv()), false)
p.paramList(sig.Params(), sig.Variadic())
p.paramList(sig.Results(), false)
p.int(0) // dummy value for go:nointerface pragma - ignored by importer
}
if trace && methods != nil {
p.tracef("<\n} ")
}
}
type methodsByName []*types.Func
func (x methodsByName) Len() int { return len(x) }
func (x methodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x methodsByName) Less(i, j int) bool { return x[i].Name() < x[j].Name() }
func (p *exporter) fieldList(t *types.Struct) {
if trace && t.NumFields() > 0 {
p.tracef("fields {>\n")
defer p.tracef("<\n} ")
}
p.int(t.NumFields())
for i := 0; i < t.NumFields(); i++ {
if trace && i > 0 {
p.tracef("\n")
}
p.field(t.Field(i))
p.string(t.Tag(i))
}
}
func (p *exporter) field(f *types.Var) {
if !f.IsField() {
panic(internalError("field expected"))
}
p.pos(f)
p.fieldName(f)
p.typ(f.Type())
}
func (p *exporter) iface(t *types.Interface) {
// TODO(gri): enable importer to load embedded interfaces,
// then emit Embeddeds and ExplicitMethods separately here.
p.int(0)
n := t.NumMethods()
if trace && n > 0 {
p.tracef("methods {>\n")
defer p.tracef("<\n} ")
}
p.int(n)
for i := 0; i < n; i++ {
if trace && i > 0 {
p.tracef("\n")
}
p.method(t.Method(i))
}
}
func (p *exporter) method(m *types.Func) {
sig := m.Type().(*types.Signature)
if sig.Recv() == nil {
panic(internalError("method expected"))
}
p.pos(m)
p.string(m.Name())
if m.Name() != "_" && !ast.IsExported(m.Name()) {
p.pkg(m.Pkg(), false)
}
// interface method; no need to encode receiver.
p.paramList(sig.Params(), sig.Variadic())
p.paramList(sig.Results(), false)
}
func (p *exporter) fieldName(f *types.Var) {
name := f.Name()
if f.Anonymous() {
// anonymous field - we distinguish between 3 cases:
// 1) field name matches base type name and is exported
// 2) field name matches base type name and is not exported
// 3) field name doesn't match base type name (alias name)
bname := basetypeName(f.Type())
if name == bname {
if ast.IsExported(name) {
name = "" // 1) we don't need to know the field name or package
} else {
name = "?" // 2) use unexported name "?" to force package export
}
} else {
// 3) indicate alias and export name as is
// (this requires an extra "@" but this is a rare case)
p.string("@")
}
}
p.string(name)
if name != "" && !ast.IsExported(name) {
p.pkg(f.Pkg(), false)
}
}
func basetypeName(typ types.Type) string {
switch typ := deref(typ).(type) {
case *types.Basic:
return typ.Name()
case *types.Named:
return typ.Obj().Name()
default:
return "" // unnamed type
}
}
func (p *exporter) paramList(params *types.Tuple, variadic bool) {
// use negative length to indicate unnamed parameters
// (look at the first parameter only since either all
// names are present or all are absent)
n := params.Len()
if n > 0 && params.At(0).Name() == "" {
n = -n
}
p.int(n)
for i := 0; i < params.Len(); i++ {
q := params.At(i)
t := q.Type()
if variadic && i == params.Len()-1 {
t = &dddSlice{t.(*types.Slice).Elem()}
}
p.typ(t)
if n > 0 {
name := q.Name()
p.string(name)
if name != "_" {
p.pkg(q.Pkg(), false)
}
}
p.string("") // no compiler-specific info
}
}
func (p *exporter) value(x constant.Value) {
if trace {
p.tracef("= ")
}
switch x.Kind() {
case constant.Bool:
tag := falseTag
if constant.BoolVal(x) {
tag = trueTag
}
p.tag(tag)
case constant.Int:
if v, exact := constant.Int64Val(x); exact {
// common case: x fits into an int64 - use compact encoding
p.tag(int64Tag)
p.int64(v)
return
}
// uncommon case: large x - use float encoding
// (powers of 2 will be encoded efficiently with exponent)
p.tag(floatTag)
p.float(constant.ToFloat(x))
case constant.Float:
p.tag(floatTag)
p.float(x)
case constant.Complex:
p.tag(complexTag)
p.float(constant.Real(x))
p.float(constant.Imag(x))
case constant.String:
p.tag(stringTag)
p.string(constant.StringVal(x))
case constant.Unknown:
// package contains type errors
p.tag(unknownTag)
default:
panic(internalErrorf("unexpected value %v (%T)", x, x))
}
}
func (p *exporter) float(x constant.Value) {
if x.Kind() != constant.Float {
panic(internalErrorf("unexpected constant %v, want float", x))
}
// extract sign (there is no -0)
sign := constant.Sign(x)
if sign == 0 {
// x == 0
p.int(0)
return
}
// x != 0
var f big.Float
if v, exact := constant.Float64Val(x); exact {
// float64
f.SetFloat64(v)
} else if num, denom := constant.Num(x), constant.Denom(x); num.Kind() == constant.Int {
// TODO(gri): add big.Rat accessor to constant.Value.
r := valueToRat(num)
f.SetRat(r.Quo(r, valueToRat(denom)))
} else {
// Value too large to represent as a fraction => inaccessible.
// TODO(gri): add big.Float accessor to constant.Value.
f.SetFloat64(math.MaxFloat64) // FIXME
}
// extract exponent such that 0.5 <= m < 1.0
var m big.Float
exp := f.MantExp(&m)
// extract mantissa as *big.Int
// - set exponent large enough so mant satisfies mant.IsInt()
// - get *big.Int from mant
m.SetMantExp(&m, int(m.MinPrec()))
mant, acc := m.Int(nil)
if acc != big.Exact {
panic(internalError("internal error"))
}
p.int(sign)
p.int(exp)
p.string(string(mant.Bytes()))
}
func valueToRat(x constant.Value) *big.Rat {
// Convert little-endian to big-endian.
// I can't believe this is necessary.
bytes := constant.Bytes(x)
for i := 0; i < len(bytes)/2; i++ {
bytes[i], bytes[len(bytes)-1-i] = bytes[len(bytes)-1-i], bytes[i]
}
return new(big.Rat).SetInt(new(big.Int).SetBytes(bytes))
}
func (p *exporter) bool(b bool) bool {
if trace {
p.tracef("[")
defer p.tracef("= %v] ", b)
}
x := 0
if b {
x = 1
}
p.int(x)
return b
}
// ----------------------------------------------------------------------------
// Low-level encoders
func (p *exporter) index(marker byte, index int) {
if index < 0 {
panic(internalError("invalid index < 0"))
}
if debugFormat {
p.marker('t')
}
if trace {
p.tracef("%c%d ", marker, index)
}
p.rawInt64(int64(index))
}
func (p *exporter) tag(tag int) {
if tag >= 0 {
panic(internalError("invalid tag >= 0"))
}
if debugFormat {
p.marker('t')
}
if trace {
p.tracef("%s ", tagString[-tag])
}
p.rawInt64(int64(tag))
}
func (p *exporter) int(x int) {
p.int64(int64(x))
}
func (p *exporter) int64(x int64) {
if debugFormat {
p.marker('i')
}
if trace {
p.tracef("%d ", x)
}
p.rawInt64(x)
}
func (p *exporter) string(s string) {
if debugFormat {
p.marker('s')
}
if trace {
p.tracef("%q ", s)
}
// if we saw the string before, write its index (>= 0)
// (the empty string is mapped to 0)
if i, ok := p.strIndex[s]; ok {
p.rawInt64(int64(i))
return
}
// otherwise, remember string and write its negative length and bytes
p.strIndex[s] = len(p.strIndex)
p.rawInt64(-int64(len(s)))
for i := 0; i < len(s); i++ {
p.rawByte(s[i])
}
}
// marker emits a marker byte and position information which makes
// it easy for a reader to detect if it is "out of sync". Used for
// debugFormat format only.
func (p *exporter) marker(m byte) {
p.rawByte(m)
// Enable this for help tracking down the location
// of an incorrect marker when running in debugFormat.
if false && trace {
p.tracef("#%d ", p.written)
}
p.rawInt64(int64(p.written))
}
// rawInt64 should only be used by low-level encoders.
func (p *exporter) rawInt64(x int64) {
var tmp [binary.MaxVarintLen64]byte
n := binary.PutVarint(tmp[:], x)
for i := 0; i < n; i++ {
p.rawByte(tmp[i])
}
}
// rawStringln should only be used to emit the initial version string.
func (p *exporter) rawStringln(s string) {
for i := 0; i < len(s); i++ {
p.rawByte(s[i])
}
p.rawByte('\n')
}
// rawByte is the bottleneck interface to write to p.out.
// rawByte escapes b as follows (any encoding does that
// hides '$'):
//
// '$' => '|' 'S'
// '|' => '|' '|'
//
// Necessary so other tools can find the end of the
// export data by searching for "$$".
// rawByte should only be used by low-level encoders.
func (p *exporter) rawByte(b byte) {
switch b {
case '$':
// write '$' as '|' 'S'
b = 'S'
fallthrough
case '|':
// write '|' as '|' '|'
p.out.WriteByte('|')
p.written++
}
p.out.WriteByte(b)
p.written++
}
// tracef is like fmt.Printf but it rewrites the format string
// to take care of indentation.
func (p *exporter) tracef(format string, args ...interface{}) {
if strings.ContainsAny(format, "<>\n") {
var buf bytes.Buffer
for i := 0; i < len(format); i++ {
// no need to deal with runes
ch := format[i]
switch ch {
case '>':
p.indent++
continue
case '<':
p.indent--
continue
}
buf.WriteByte(ch)
if ch == '\n' {
for j := p.indent; j > 0; j-- {
buf.WriteString(". ")
}
}
}
format = buf.String()
}
fmt.Printf(format, args...)
}
// Debugging support.
// (tagString is only used when tracing is enabled)
var tagString = [...]string{
// Packages
-packageTag: "package",
// Types
-namedTag: "named type",
-arrayTag: "array",
-sliceTag: "slice",
-dddTag: "ddd",
-structTag: "struct",
-pointerTag: "pointer",
-signatureTag: "signature",
-interfaceTag: "interface",
-mapTag: "map",
-chanTag: "chan",
// Values
-falseTag: "false",
-trueTag: "true",
-int64Tag: "int64",
-floatTag: "float",
-fractionTag: "fraction",
-complexTag: "complex",
-stringTag: "string",
-unknownTag: "unknown",
// Type aliases
-aliasTag: "alias",
}

File diff suppressed because it is too large Load diff

View file

@ -1,93 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go.
// This file implements FindExportData.
package gcimporter
import (
"bufio"
"fmt"
"io"
"strconv"
"strings"
)
func readGopackHeader(r *bufio.Reader) (name string, size int, err error) {
// See $GOROOT/include/ar.h.
hdr := make([]byte, 16+12+6+6+8+10+2)
_, err = io.ReadFull(r, hdr)
if err != nil {
return
}
// leave for debugging
if false {
fmt.Printf("header: %s", hdr)
}
s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
size, err = strconv.Atoi(s)
if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
err = fmt.Errorf("invalid archive header")
return
}
name = strings.TrimSpace(string(hdr[:16]))
return
}
// FindExportData positions the reader r at the beginning of the
// export data section of an underlying GC-created object/archive
// file by reading from it. The reader must be positioned at the
// start of the file before calling this function. The hdr result
// is the string before the export data, either "$$" or "$$B".
//
func FindExportData(r *bufio.Reader) (hdr string, err error) {
// Read first line to make sure this is an object file.
line, err := r.ReadSlice('\n')
if err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
if string(line) == "!<arch>\n" {
// Archive file. Scan to __.PKGDEF.
var name string
if name, _, err = readGopackHeader(r); err != nil {
return
}
// First entry should be __.PKGDEF.
if name != "__.PKGDEF" {
err = fmt.Errorf("go archive is missing __.PKGDEF")
return
}
// Read first line of __.PKGDEF data, so that line
// is once again the first line of the input.
if line, err = r.ReadSlice('\n'); err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
}
// Now at __.PKGDEF in archive or still at beginning of file.
// Either way, line should begin with "go object ".
if !strings.HasPrefix(string(line), "go object ") {
err = fmt.Errorf("not a Go object file")
return
}
// Skip over object header to export data.
// Begins after first line starting with $$.
for line[0] != '$' {
if line, err = r.ReadSlice('\n'); err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
}
hdr = string(line)
return
}

File diff suppressed because it is too large Load diff

View file

@ -1,598 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Indexed package import.
// See cmd/compile/internal/gc/iexport.go for the export data format.
// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
package gcimporter
import (
"bytes"
"encoding/binary"
"fmt"
"go/constant"
"go/token"
"go/types"
"io"
"sort"
)
type intReader struct {
*bytes.Reader
path string
}
func (r *intReader) int64() int64 {
i, err := binary.ReadVarint(r.Reader)
if err != nil {
errorf("import %q: read varint error: %v", r.path, err)
}
return i
}
func (r *intReader) uint64() uint64 {
i, err := binary.ReadUvarint(r.Reader)
if err != nil {
errorf("import %q: read varint error: %v", r.path, err)
}
return i
}
const predeclReserved = 32
type itag uint64
const (
// Types
definedType itag = iota
pointerType
sliceType
arrayType
chanType
mapType
signatureType
structType
interfaceType
)
// IImportData imports a package from the serialized package data
// and returns the number of bytes consumed and a reference to the package.
// If the export data version is not recognized or the format is otherwise
// compromised, an error is returned.
func IImportData(fset *token.FileSet, imports map[string]*types.Package, data []byte, path string) (_ int, pkg *types.Package, err error) {
const currentVersion = 0
version := -1
defer func() {
if e := recover(); e != nil {
if version > currentVersion {
err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
} else {
err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
}
}
}()
r := &intReader{bytes.NewReader(data), path}
version = int(r.uint64())
switch version {
case currentVersion:
default:
errorf("unknown iexport format version %d", version)
}
sLen := int64(r.uint64())
dLen := int64(r.uint64())
whence, _ := r.Seek(0, io.SeekCurrent)
stringData := data[whence : whence+sLen]
declData := data[whence+sLen : whence+sLen+dLen]
r.Seek(sLen+dLen, io.SeekCurrent)
p := iimporter{
ipath: path,
stringData: stringData,
stringCache: make(map[uint64]string),
pkgCache: make(map[uint64]*types.Package),
declData: declData,
pkgIndex: make(map[*types.Package]map[string]uint64),
typCache: make(map[uint64]types.Type),
fake: fakeFileSet{
fset: fset,
files: make(map[string]*token.File),
},
}
for i, pt := range predeclared {
p.typCache[uint64(i)] = pt
}
pkgList := make([]*types.Package, r.uint64())
for i := range pkgList {
pkgPathOff := r.uint64()
pkgPath := p.stringAt(pkgPathOff)
pkgName := p.stringAt(r.uint64())
_ = r.uint64() // package height; unused by go/types
if pkgPath == "" {
pkgPath = path
}
pkg := imports[pkgPath]
if pkg == nil {
pkg = types.NewPackage(pkgPath, pkgName)
imports[pkgPath] = pkg
} else if pkg.Name() != pkgName {
errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
}
p.pkgCache[pkgPathOff] = pkg
nameIndex := make(map[string]uint64)
for nSyms := r.uint64(); nSyms > 0; nSyms-- {
name := p.stringAt(r.uint64())
nameIndex[name] = r.uint64()
}
p.pkgIndex[pkg] = nameIndex
pkgList[i] = pkg
}
localpkg := pkgList[0]
names := make([]string, 0, len(p.pkgIndex[localpkg]))
for name := range p.pkgIndex[localpkg] {
names = append(names, name)
}
sort.Strings(names)
for _, name := range names {
p.doDecl(localpkg, name)
}
for _, typ := range p.interfaceList {
typ.Complete()
}
// record all referenced packages as imports
list := append(([]*types.Package)(nil), pkgList[1:]...)
sort.Sort(byPath(list))
localpkg.SetImports(list)
// package was imported completely and without errors
localpkg.MarkComplete()
consumed, _ := r.Seek(0, io.SeekCurrent)
return int(consumed), localpkg, nil
}
type iimporter struct {
ipath string
stringData []byte
stringCache map[uint64]string
pkgCache map[uint64]*types.Package
declData []byte
pkgIndex map[*types.Package]map[string]uint64
typCache map[uint64]types.Type
fake fakeFileSet
interfaceList []*types.Interface
}
func (p *iimporter) doDecl(pkg *types.Package, name string) {
// See if we've already imported this declaration.
if obj := pkg.Scope().Lookup(name); obj != nil {
return
}
off, ok := p.pkgIndex[pkg][name]
if !ok {
errorf("%v.%v not in index", pkg, name)
}
r := &importReader{p: p, currPkg: pkg}
r.declReader.Reset(p.declData[off:])
r.obj(name)
}
func (p *iimporter) stringAt(off uint64) string {
if s, ok := p.stringCache[off]; ok {
return s
}
slen, n := binary.Uvarint(p.stringData[off:])
if n <= 0 {
errorf("varint failed")
}
spos := off + uint64(n)
s := string(p.stringData[spos : spos+slen])
p.stringCache[off] = s
return s
}
func (p *iimporter) pkgAt(off uint64) *types.Package {
if pkg, ok := p.pkgCache[off]; ok {
return pkg
}
path := p.stringAt(off)
errorf("missing package %q in %q", path, p.ipath)
return nil
}
func (p *iimporter) typAt(off uint64, base *types.Named) types.Type {
if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) {
return t
}
if off < predeclReserved {
errorf("predeclared type missing from cache: %v", off)
}
r := &importReader{p: p}
r.declReader.Reset(p.declData[off-predeclReserved:])
t := r.doType(base)
if base == nil || !isInterface(t) {
p.typCache[off] = t
}
return t
}
type importReader struct {
p *iimporter
declReader bytes.Reader
currPkg *types.Package
prevFile string
prevLine int64
}
func (r *importReader) obj(name string) {
tag := r.byte()
pos := r.pos()
switch tag {
case 'A':
typ := r.typ()
r.declare(types.NewTypeName(pos, r.currPkg, name, typ))
case 'C':
typ, val := r.value()
r.declare(types.NewConst(pos, r.currPkg, name, typ, val))
case 'F':
sig := r.signature(nil)
r.declare(types.NewFunc(pos, r.currPkg, name, sig))
case 'T':
// Types can be recursive. We need to setup a stub
// declaration before recursing.
obj := types.NewTypeName(pos, r.currPkg, name, nil)
named := types.NewNamed(obj, nil, nil)
r.declare(obj)
underlying := r.p.typAt(r.uint64(), named).Underlying()
named.SetUnderlying(underlying)
if !isInterface(underlying) {
for n := r.uint64(); n > 0; n-- {
mpos := r.pos()
mname := r.ident()
recv := r.param()
msig := r.signature(recv)
named.AddMethod(types.NewFunc(mpos, r.currPkg, mname, msig))
}
}
case 'V':
typ := r.typ()
r.declare(types.NewVar(pos, r.currPkg, name, typ))
default:
errorf("unexpected tag: %v", tag)
}
}
func (r *importReader) declare(obj types.Object) {
obj.Pkg().Scope().Insert(obj)
}
func (r *importReader) value() (typ types.Type, val constant.Value) {
typ = r.typ()
switch b := typ.Underlying().(*types.Basic); b.Info() & types.IsConstType {
case types.IsBoolean:
val = constant.MakeBool(r.bool())
case types.IsString:
val = constant.MakeString(r.string())
case types.IsInteger:
val = r.mpint(b)
case types.IsFloat:
val = r.mpfloat(b)
case types.IsComplex:
re := r.mpfloat(b)
im := r.mpfloat(b)
val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
default:
errorf("unexpected type %v", typ) // panics
panic("unreachable")
}
return
}
func intSize(b *types.Basic) (signed bool, maxBytes uint) {
if (b.Info() & types.IsUntyped) != 0 {
return true, 64
}
switch b.Kind() {
case types.Float32, types.Complex64:
return true, 3
case types.Float64, types.Complex128:
return true, 7
}
signed = (b.Info() & types.IsUnsigned) == 0
switch b.Kind() {
case types.Int8, types.Uint8:
maxBytes = 1
case types.Int16, types.Uint16:
maxBytes = 2
case types.Int32, types.Uint32:
maxBytes = 4
default:
maxBytes = 8
}
return
}
func (r *importReader) mpint(b *types.Basic) constant.Value {
signed, maxBytes := intSize(b)
maxSmall := 256 - maxBytes
if signed {
maxSmall = 256 - 2*maxBytes
}
if maxBytes == 1 {
maxSmall = 256
}
n, _ := r.declReader.ReadByte()
if uint(n) < maxSmall {
v := int64(n)
if signed {
v >>= 1
if n&1 != 0 {
v = ^v
}
}
return constant.MakeInt64(v)
}
v := -n
if signed {
v = -(n &^ 1) >> 1
}
if v < 1 || uint(v) > maxBytes {
errorf("weird decoding: %v, %v => %v", n, signed, v)
}
buf := make([]byte, v)
io.ReadFull(&r.declReader, buf)
// convert to little endian
// TODO(gri) go/constant should have a more direct conversion function
// (e.g., once it supports a big.Float based implementation)
for i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 {
buf[i], buf[j] = buf[j], buf[i]
}
x := constant.MakeFromBytes(buf)
if signed && n&1 != 0 {
x = constant.UnaryOp(token.SUB, x, 0)
}
return x
}
func (r *importReader) mpfloat(b *types.Basic) constant.Value {
x := r.mpint(b)
if constant.Sign(x) == 0 {
return x
}
exp := r.int64()
switch {
case exp > 0:
x = constant.Shift(x, token.SHL, uint(exp))
case exp < 0:
d := constant.Shift(constant.MakeInt64(1), token.SHL, uint(-exp))
x = constant.BinaryOp(x, token.QUO, d)
}
return x
}
func (r *importReader) ident() string {
return r.string()
}
func (r *importReader) qualifiedIdent() (*types.Package, string) {
name := r.string()
pkg := r.pkg()
return pkg, name
}
func (r *importReader) pos() token.Pos {
delta := r.int64()
if delta != deltaNewFile {
r.prevLine += delta
} else if l := r.int64(); l == -1 {
r.prevLine += deltaNewFile
} else {
r.prevFile = r.string()
r.prevLine = l
}
if r.prevFile == "" && r.prevLine == 0 {
return token.NoPos
}
return r.p.fake.pos(r.prevFile, int(r.prevLine))
}
func (r *importReader) typ() types.Type {
return r.p.typAt(r.uint64(), nil)
}
func isInterface(t types.Type) bool {
_, ok := t.(*types.Interface)
return ok
}
func (r *importReader) pkg() *types.Package { return r.p.pkgAt(r.uint64()) }
func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
func (r *importReader) doType(base *types.Named) types.Type {
switch k := r.kind(); k {
default:
errorf("unexpected kind tag in %q: %v", r.p.ipath, k)
return nil
case definedType:
pkg, name := r.qualifiedIdent()
r.p.doDecl(pkg, name)
return pkg.Scope().Lookup(name).(*types.TypeName).Type()
case pointerType:
return types.NewPointer(r.typ())
case sliceType:
return types.NewSlice(r.typ())
case arrayType:
n := r.uint64()
return types.NewArray(r.typ(), int64(n))
case chanType:
dir := chanDir(int(r.uint64()))
return types.NewChan(dir, r.typ())
case mapType:
return types.NewMap(r.typ(), r.typ())
case signatureType:
r.currPkg = r.pkg()
return r.signature(nil)
case structType:
r.currPkg = r.pkg()
fields := make([]*types.Var, r.uint64())
tags := make([]string, len(fields))
for i := range fields {
fpos := r.pos()
fname := r.ident()
ftyp := r.typ()
emb := r.bool()
tag := r.string()
fields[i] = types.NewField(fpos, r.currPkg, fname, ftyp, emb)
tags[i] = tag
}
return types.NewStruct(fields, tags)
case interfaceType:
r.currPkg = r.pkg()
embeddeds := make([]types.Type, r.uint64())
for i := range embeddeds {
_ = r.pos()
embeddeds[i] = r.typ()
}
methods := make([]*types.Func, r.uint64())
for i := range methods {
mpos := r.pos()
mname := r.ident()
// TODO(mdempsky): Matches bimport.go, but I
// don't agree with this.
var recv *types.Var
if base != nil {
recv = types.NewVar(token.NoPos, r.currPkg, "", base)
}
msig := r.signature(recv)
methods[i] = types.NewFunc(mpos, r.currPkg, mname, msig)
}
typ := newInterface(methods, embeddeds)
r.p.interfaceList = append(r.p.interfaceList, typ)
return typ
}
}
func (r *importReader) kind() itag {
return itag(r.uint64())
}
func (r *importReader) signature(recv *types.Var) *types.Signature {
params := r.paramList()
results := r.paramList()
variadic := params.Len() > 0 && r.bool()
return types.NewSignature(recv, params, results, variadic)
}
func (r *importReader) paramList() *types.Tuple {
xs := make([]*types.Var, r.uint64())
for i := range xs {
xs[i] = r.param()
}
return types.NewTuple(xs...)
}
func (r *importReader) param() *types.Var {
pos := r.pos()
name := r.ident()
typ := r.typ()
return types.NewParam(pos, r.currPkg, name, typ)
}
func (r *importReader) bool() bool {
return r.uint64() != 0
}
func (r *importReader) int64() int64 {
n, err := binary.ReadVarint(&r.declReader)
if err != nil {
errorf("readVarint: %v", err)
}
return n
}
func (r *importReader) uint64() uint64 {
n, err := binary.ReadUvarint(&r.declReader)
if err != nil {
errorf("readUvarint: %v", err)
}
return n
}
func (r *importReader) byte() byte {
x, err := r.declReader.ReadByte()
if err != nil {
errorf("declReader.ReadByte: %v", err)
}
return x
}

View file

@ -1,21 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.11
package gcimporter
import "go/types"
func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
named := make([]*types.Named, len(embeddeds))
for i, e := range embeddeds {
var ok bool
named[i], ok = e.(*types.Named)
if !ok {
panic("embedding of non-defined interfaces in interfaces is not supported before Go 1.11")
}
}
return types.NewInterface(methods, named)
}

View file

@ -1,13 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.11
package gcimporter
import "go/types"
func newInterface(methods []*types.Func, embeddeds []types.Type) *types.Interface {
return types.NewInterfaceType(methods, embeddeds)
}

View file

@ -1,205 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package loader loads a complete Go program from source code, parsing
// and type-checking the initial packages plus their transitive closure
// of dependencies. The ASTs and the derived facts are retained for
// later use.
//
// THIS INTERFACE IS EXPERIMENTAL AND IS LIKELY TO CHANGE.
//
// The package defines two primary types: Config, which specifies a
// set of initial packages to load and various other options; and
// Program, which is the result of successfully loading the packages
// specified by a configuration.
//
// The configuration can be set directly, but *Config provides various
// convenience methods to simplify the common cases, each of which can
// be called any number of times. Finally, these are followed by a
// call to Load() to actually load and type-check the program.
//
// var conf loader.Config
//
// // Use the command-line arguments to specify
// // a set of initial packages to load from source.
// // See FromArgsUsage for help.
// rest, err := conf.FromArgs(os.Args[1:], wantTests)
//
// // Parse the specified files and create an ad hoc package with path "foo".
// // All files must have the same 'package' declaration.
// conf.CreateFromFilenames("foo", "foo.go", "bar.go")
//
// // Create an ad hoc package with path "foo" from
// // the specified already-parsed files.
// // All ASTs must have the same 'package' declaration.
// conf.CreateFromFiles("foo", parsedFiles)
//
// // Add "runtime" to the set of packages to be loaded.
// conf.Import("runtime")
//
// // Adds "fmt" and "fmt_test" to the set of packages
// // to be loaded. "fmt" will include *_test.go files.
// conf.ImportWithTests("fmt")
//
// // Finally, load all the packages specified by the configuration.
// prog, err := conf.Load()
//
// See examples_test.go for examples of API usage.
//
//
// CONCEPTS AND TERMINOLOGY
//
// The WORKSPACE is the set of packages accessible to the loader. The
// workspace is defined by Config.Build, a *build.Context. The
// default context treats subdirectories of $GOROOT and $GOPATH as
// packages, but this behavior may be overridden.
//
// An AD HOC package is one specified as a set of source files on the
// command line. In the simplest case, it may consist of a single file
// such as $GOROOT/src/net/http/triv.go.
//
// EXTERNAL TEST packages are those comprised of a set of *_test.go
// files all with the same 'package foo_test' declaration, all in the
// same directory. (go/build.Package calls these files XTestFiles.)
//
// An IMPORTABLE package is one that can be referred to by some import
// spec. Every importable package is uniquely identified by its
// PACKAGE PATH or just PATH, a string such as "fmt", "encoding/json",
// or "cmd/vendor/golang.org/x/arch/x86/x86asm". A package path
// typically denotes a subdirectory of the workspace.
//
// An import declaration uses an IMPORT PATH to refer to a package.
// Most import declarations use the package path as the import path.
//
// Due to VENDORING (https://golang.org/s/go15vendor), the
// interpretation of an import path may depend on the directory in which
// it appears. To resolve an import path to a package path, go/build
// must search the enclosing directories for a subdirectory named
// "vendor".
//
// ad hoc packages and external test packages are NON-IMPORTABLE. The
// path of an ad hoc package is inferred from the package
// declarations of its files and is therefore not a unique package key.
// For example, Config.CreatePkgs may specify two initial ad hoc
// packages, both with path "main".
//
// An AUGMENTED package is an importable package P plus all the
// *_test.go files with same 'package foo' declaration as P.
// (go/build.Package calls these files TestFiles.)
//
// The INITIAL packages are those specified in the configuration. A
// DEPENDENCY is a package loaded to satisfy an import in an initial
// package or another dependency.
//
package loader
// IMPLEMENTATION NOTES
//
// 'go test', in-package test files, and import cycles
// ---------------------------------------------------
//
// An external test package may depend upon members of the augmented
// package that are not in the unaugmented package, such as functions
// that expose internals. (See bufio/export_test.go for an example.)
// So, the loader must ensure that for each external test package
// it loads, it also augments the corresponding non-test package.
//
// The import graph over n unaugmented packages must be acyclic; the
// import graph over n-1 unaugmented packages plus one augmented
// package must also be acyclic. ('go test' relies on this.) But the
// import graph over n augmented packages may contain cycles.
//
// First, all the (unaugmented) non-test packages and their
// dependencies are imported in the usual way; the loader reports an
// error if it detects an import cycle.
//
// Then, each package P for which testing is desired is augmented by
// the list P' of its in-package test files, by calling
// (*types.Checker).Files. This arrangement ensures that P' may
// reference definitions within P, but P may not reference definitions
// within P'. Furthermore, P' may import any other package, including
// ones that depend upon P, without an import cycle error.
//
// Consider two packages A and B, both of which have lists of
// in-package test files we'll call A' and B', and which have the
// following import graph edges:
// B imports A
// B' imports A
// A' imports B
// This last edge would be expected to create an error were it not
// for the special type-checking discipline above.
// Cycles of size greater than two are possible. For example:
// compress/bzip2/bzip2_test.go (package bzip2) imports "io/ioutil"
// io/ioutil/tempfile_test.go (package ioutil) imports "regexp"
// regexp/exec_test.go (package regexp) imports "compress/bzip2"
//
//
// Concurrency
// -----------
//
// Let us define the import dependency graph as follows. Each node is a
// list of files passed to (Checker).Files at once. Many of these lists
// are the production code of an importable Go package, so those nodes
// are labelled by the package's path. The remaining nodes are
// ad hoc packages and lists of in-package *_test.go files that augment
// an importable package; those nodes have no label.
//
// The edges of the graph represent import statements appearing within a
// file. An edge connects a node (a list of files) to the node it
// imports, which is importable and thus always labelled.
//
// Loading is controlled by this dependency graph.
//
// To reduce I/O latency, we start loading a package's dependencies
// asynchronously as soon as we've parsed its files and enumerated its
// imports (scanImports). This performs a preorder traversal of the
// import dependency graph.
//
// To exploit hardware parallelism, we type-check unrelated packages in
// parallel, where "unrelated" means not ordered by the partial order of
// the import dependency graph.
//
// We use a concurrency-safe non-blocking cache (importer.imported) to
// record the results of type-checking, whether success or failure. An
// entry is created in this cache by startLoad the first time the
// package is imported. The first goroutine to request an entry becomes
// responsible for completing the task and broadcasting completion to
// subsequent requestors, which block until then.
//
// Type checking occurs in (parallel) postorder: we cannot type-check a
// set of files until we have loaded and type-checked all of their
// immediate dependencies (and thus all of their transitive
// dependencies). If the input were guaranteed free of import cycles,
// this would be trivial: we could simply wait for completion of the
// dependencies and then invoke the typechecker.
//
// But as we saw in the 'go test' section above, some cycles in the
// import graph over packages are actually legal, so long as the
// cycle-forming edge originates in the in-package test files that
// augment the package. This explains why the nodes of the import
// dependency graph are not packages, but lists of files: the unlabelled
// nodes avoid the cycles. Consider packages A and B where B imports A
// and A's in-package tests AT import B. The naively constructed import
// graph over packages would contain a cycle (A+AT) --> B --> (A+AT) but
// the graph over lists of files is AT --> B --> A, where AT is an
// unlabelled node.
//
// Awaiting completion of the dependencies in a cyclic graph would
// deadlock, so we must materialize the import dependency graph (as
// importer.graph) and check whether each import edge forms a cycle. If
// x imports y, and the graph already contains a path from y to x, then
// there is an import cycle, in which case the processing of x must not
// wait for the completion of processing of y.
//
// When the type-checker makes a callback (doImport) to the loader for a
// given import edge, there are two possible cases. In the normal case,
// the dependency has already been completely type-checked; doImport
// does a cache lookup and returns it. In the cyclic case, the entry in
// the cache is still necessarily incomplete, indicating a cycle. We
// perform the cycle check again to obtain the error message, and return
// the error.
//
// The result of using concurrency is about a 2.5x speedup for stdlib_test.
// TODO(adonovan): overhaul the package documentation.

File diff suppressed because it is too large Load diff

View file

@ -1,124 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package loader
import (
"go/ast"
"go/build"
"go/parser"
"go/token"
"io"
"os"
"strconv"
"sync"
"golang.org/x/tools/go/buildutil"
)
// We use a counting semaphore to limit
// the number of parallel I/O calls per process.
var ioLimit = make(chan bool, 10)
// parseFiles parses the Go source files within directory dir and
// returns the ASTs of the ones that could be at least partially parsed,
// along with a list of I/O and parse errors encountered.
//
// I/O is done via ctxt, which may specify a virtual file system.
// displayPath is used to transform the filenames attached to the ASTs.
//
func parseFiles(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, files []string, mode parser.Mode) ([]*ast.File, []error) {
if displayPath == nil {
displayPath = func(path string) string { return path }
}
var wg sync.WaitGroup
n := len(files)
parsed := make([]*ast.File, n)
errors := make([]error, n)
for i, file := range files {
if !buildutil.IsAbsPath(ctxt, file) {
file = buildutil.JoinPath(ctxt, dir, file)
}
wg.Add(1)
go func(i int, file string) {
ioLimit <- true // wait
defer func() {
wg.Done()
<-ioLimit // signal
}()
var rd io.ReadCloser
var err error
if ctxt.OpenFile != nil {
rd, err = ctxt.OpenFile(file)
} else {
rd, err = os.Open(file)
}
if err != nil {
errors[i] = err // open failed
return
}
// ParseFile may return both an AST and an error.
parsed[i], errors[i] = parser.ParseFile(fset, displayPath(file), rd, mode)
rd.Close()
}(i, file)
}
wg.Wait()
// Eliminate nils, preserving order.
var o int
for _, f := range parsed {
if f != nil {
parsed[o] = f
o++
}
}
parsed = parsed[:o]
o = 0
for _, err := range errors {
if err != nil {
errors[o] = err
o++
}
}
errors = errors[:o]
return parsed, errors
}
// scanImports returns the set of all import paths from all
// import specs in the specified files.
func scanImports(files []*ast.File) map[string]bool {
imports := make(map[string]bool)
for _, f := range files {
for _, decl := range f.Decls {
if decl, ok := decl.(*ast.GenDecl); ok && decl.Tok == token.IMPORT {
for _, spec := range decl.Specs {
spec := spec.(*ast.ImportSpec)
// NB: do not assume the program is well-formed!
path, err := strconv.Unquote(spec.Path.Value)
if err != nil {
continue // quietly ignore the error
}
if path == "C" {
continue // skip pseudopackage
}
imports[path] = true
}
}
}
}
return imports
}
// ---------- Internal helpers ----------
// TODO(adonovan): make this a method: func (*token.File) Contains(token.Pos)
func tokenFileContainsPos(f *token.File, pos token.Pos) bool {
p := int(pos)
base := f.Base()
return base <= p && p < base+f.Size()
}

View file

@ -1,241 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package packages loads Go packages for inspection and analysis.
Note: Though this package is ready for widespread use, we may make minor
breaking changes if absolutely necessary. Any such change will be
announced on golang-tools@ at least one week before it is committed. No
more breaking changes will be made after December 1, 2018.
The Load function takes as input a list of patterns and return a list of Package
structs describing individual packages matched by those patterns.
The LoadMode controls the amount of detail in the loaded packages.
Load passes most patterns directly to the underlying build tool,
but all patterns with the prefix "query=", where query is a
non-empty string of letters from [a-z], are reserved and may be
interpreted as query operators.
Only two query operators are currently supported, "file" and "pattern".
The query "file=path/to/file.go" matches the package or packages enclosing
the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go"
might returns the packages "fmt" and "fmt [fmt.test]".
The query "pattern=string" causes "string" to be passed directly to
the underlying build tool. In most cases this is unnecessary,
but an application can use Load("pattern=" + x) as an escaping mechanism
to ensure that x is not interpreted as a query operator if it contains '='.
A third query "name=identifier" will be added soon.
It will match packages whose package declaration contains the specified identifier.
For example, "name=rand" would match the packages "math/rand" and "crypto/rand",
and "name=main" would match all executables.
All other query operators are reserved for future use and currently
cause Load to report an error.
The Package struct provides basic information about the package, including
- ID, a unique identifier for the package in the returned set;
- GoFiles, the names of the package's Go source files;
- Imports, a map from source import strings to the Packages they name;
- Types, the type information for the package's exported symbols;
- Syntax, the parsed syntax trees for the package's source code; and
- TypeInfo, the result of a complete type-check of the package syntax trees.
(See the documentation for type Package for the complete list of fields
and more detailed descriptions.)
For example,
Load(nil, "bytes", "unicode...")
returns four Package structs describing the standard library packages
bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern
can match multiple packages and that a package might be matched by
multiple patterns: in general it is not possible to determine which
packages correspond to which patterns.
Note that the list returned by Load contains only the packages matched
by the patterns. Their dependencies can be found by walking the import
graph using the Imports fields.
The Load function can be configured by passing a pointer to a Config as
the first argument. A nil Config is equivalent to the zero Config, which
causes Load to run in LoadFiles mode, collecting minimal information.
See the documentation for type Config for details.
As noted earlier, the Config.Mode controls the amount of detail
reported about the loaded packages, with each mode returning all the data of the
previous mode with some extra added. See the documentation for type LoadMode
for details.
Most tools should pass their command-line arguments (after any flags)
uninterpreted to the loader, so that the loader can interpret them
according to the conventions of the underlying build system.
See the Example function for typical usage.
*/
package packages // import "golang.org/x/tools/go/packages"
/*
Motivation and design considerations
The new package's design solves problems addressed by two existing
packages: go/build, which locates and describes packages, and
golang.org/x/tools/go/loader, which loads, parses and type-checks them.
The go/build.Package structure encodes too much of the 'go build' way
of organizing projects, leaving us in need of a data type that describes a
package of Go source code independent of the underlying build system.
We wanted something that works equally well with go build and vgo, and
also other build systems such as Bazel and Blaze, making it possible to
construct analysis tools that work in all these environments.
Tools such as errcheck and staticcheck were essentially unavailable to
the Go community at Google, and some of Google's internal tools for Go
are unavailable externally.
This new package provides a uniform way to obtain package metadata by
querying each of these build systems, optionally supporting their
preferred command-line notations for packages, so that tools integrate
neatly with users' build environments. The Metadata query function
executes an external query tool appropriate to the current workspace.
Loading packages always returns the complete import graph "all the way down",
even if all you want is information about a single package, because the query
mechanisms of all the build systems we currently support ({go,vgo} list, and
blaze/bazel aspect-based query) cannot provide detailed information
about one package without visiting all its dependencies too, so there is
no additional asymptotic cost to providing transitive information.
(This property might not be true of a hypothetical 5th build system.)
In calls to TypeCheck, all initial packages, and any package that
transitively depends on one of them, must be loaded from source.
Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from
source; D may be loaded from export data, and E may not be loaded at all
(though it's possible that D's export data mentions it, so a
types.Package may be created for it and exposed.)
The old loader had a feature to suppress type-checking of function
bodies on a per-package basis, primarily intended to reduce the work of
obtaining type information for imported packages. Now that imports are
satisfied by export data, the optimization no longer seems necessary.
Despite some early attempts, the old loader did not exploit export data,
instead always using the equivalent of WholeProgram mode. This was due
to the complexity of mixing source and export data packages (now
resolved by the upward traversal mentioned above), and because export data
files were nearly always missing or stale. Now that 'go build' supports
caching, all the underlying build systems can guarantee to produce
export data in a reasonable (amortized) time.
Test "main" packages synthesized by the build system are now reported as
first-class packages, avoiding the need for clients (such as go/ssa) to
reinvent this generation logic.
One way in which go/packages is simpler than the old loader is in its
treatment of in-package tests. In-package tests are packages that
consist of all the files of the library under test, plus the test files.
The old loader constructed in-package tests by a two-phase process of
mutation called "augmentation": first it would construct and type check
all the ordinary library packages and type-check the packages that
depend on them; then it would add more (test) files to the package and
type-check again. This two-phase approach had four major problems:
1) in processing the tests, the loader modified the library package,
leaving no way for a client application to see both the test
package and the library package; one would mutate into the other.
2) because test files can declare additional methods on types defined in
the library portion of the package, the dispatch of method calls in
the library portion was affected by the presence of the test files.
This should have been a clue that the packages were logically
different.
3) this model of "augmentation" assumed at most one in-package test
per library package, which is true of projects using 'go build',
but not other build systems.
4) because of the two-phase nature of test processing, all packages that
import the library package had to be processed before augmentation,
forcing a "one-shot" API and preventing the client from calling Load
in several times in sequence as is now possible in WholeProgram mode.
(TypeCheck mode has a similar one-shot restriction for a different reason.)
Early drafts of this package supported "multi-shot" operation.
Although it allowed clients to make a sequence of calls (or concurrent
calls) to Load, building up the graph of Packages incrementally,
it was of marginal value: it complicated the API
(since it allowed some options to vary across calls but not others),
it complicated the implementation,
it cannot be made to work in Types mode, as explained above,
and it was less efficient than making one combined call (when this is possible).
Among the clients we have inspected, none made multiple calls to load
but could not be easily and satisfactorily modified to make only a single call.
However, applications changes may be required.
For example, the ssadump command loads the user-specified packages
and in addition the runtime package. It is tempting to simply append
"runtime" to the user-provided list, but that does not work if the user
specified an ad-hoc package such as [a.go b.go].
Instead, ssadump no longer requests the runtime package,
but seeks it among the dependencies of the user-specified packages,
and emits an error if it is not found.
Overlays: the ParseFile hook in the API permits clients to vary the way
in which ASTs are obtained from filenames; the default implementation is
based on parser.ParseFile. This features enables editor-integrated tools
that analyze the contents of modified but unsaved buffers: rather than
read from the file system, a tool can read from an archive of modified
buffers provided by the editor.
This approach has its limits. Because package metadata is obtained by
fork/execing an external query command for each build system, we can
fake only the file contents seen by the parser, type-checker, and
application, but not by the metadata query, so, for example:
- additional imports in the fake file will not be described by the
metadata, so the type checker will fail to load imports that create
new dependencies.
- in TypeCheck mode, because export data is produced by the query
command, it will not reflect the fake file contents.
- this mechanism cannot add files to a package without first saving them.
Questions & Tasks
- Add GOARCH/GOOS?
They are not portable concepts, but could be made portable.
Our goal has been to allow users to express themselves using the conventions
of the underlying build system: if the build system honors GOARCH
during a build and during a metadata query, then so should
applications built atop that query mechanism.
Conversely, if the target architecture of the build is determined by
command-line flags, the application can pass the relevant
flags through to the build system using a command such as:
myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin"
However, this approach is low-level, unwieldy, and non-portable.
GOOS and GOARCH seem important enough to warrant a dedicated option.
- How should we handle partial failures such as a mixture of good and
malformed patterns, existing and non-existent packages, successful and
failed builds, import failures, import cycles, and so on, in a call to
Load?
- Support bazel, blaze, and go1.10 list, not just go1.11 list.
- Handle (and test) various partial success cases, e.g.
a mixture of good packages and:
invalid patterns
nonexistent packages
empty packages
packages with malformed package or import declarations
unreadable files
import cycles
other parse errors
type errors
Make sure we record errors at the correct place in the graph.
- Missing packages among initial arguments are not reported.
Return bogus packages for them, like golist does.
- "undeclared name" errors (for example) are reported out of source file
order. I suspect this is due to the breadth-first resolution now used
by go/types. Is that a bug? Discuss with gri.
*/

View file

@ -1,68 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file enables an external tool to intercept package requests.
// If the tool is present then its results are used in preference to
// the go list command.
package packages
import (
"bytes"
"encoding/json"
"fmt"
"os/exec"
"strings"
)
// findExternalTool returns the file path of a tool that supplies
// the build system package structure, or "" if not found."
// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its
// value, otherwise it searches for a binary named gopackagesdriver on the PATH.
func findExternalDriver(cfg *Config) driver {
const toolPrefix = "GOPACKAGESDRIVER="
tool := ""
for _, env := range cfg.Env {
if val := strings.TrimPrefix(env, toolPrefix); val != env {
tool = val
}
}
if tool != "" && tool == "off" {
return nil
}
if tool == "" {
var err error
tool, err = exec.LookPath("gopackagesdriver")
if err != nil {
return nil
}
}
return func(cfg *Config, words ...string) (*driverResponse, error) {
buf := new(bytes.Buffer)
fullargs := []string{
"list",
fmt.Sprintf("-test=%t", cfg.Tests),
fmt.Sprintf("-export=%t", usesExportData(cfg)),
fmt.Sprintf("-deps=%t", cfg.Mode >= LoadImports),
}
for _, f := range cfg.BuildFlags {
fullargs = append(fullargs, fmt.Sprintf("-buildflag=%v", f))
}
fullargs = append(fullargs, "--")
fullargs = append(fullargs, words...)
cmd := exec.CommandContext(cfg.Context, tool, fullargs...)
cmd.Env = cfg.Env
cmd.Dir = cfg.Dir
cmd.Stdout = buf
cmd.Stderr = new(bytes.Buffer)
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
}
var response driverResponse
if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
return nil, err
}
return &response, nil
}
}

View file

@ -1,625 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packages
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"sync"
"golang.org/x/tools/internal/gopathwalk"
"golang.org/x/tools/internal/semver"
)
// A goTooOldError reports that the go command
// found by exec.LookPath is too old to use the new go list behavior.
type goTooOldError struct {
error
}
// goListDriver uses the go list command to interpret the patterns and produce
// the build system package structure.
// See driver for more details.
func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
// Determine files requested in contains patterns
var containFiles []string
var packagesNamed []string
restPatterns := make([]string, 0, len(patterns))
// Extract file= and other [querytype]= patterns. Report an error if querytype
// doesn't exist.
extractQueries:
for _, pattern := range patterns {
eqidx := strings.Index(pattern, "=")
if eqidx < 0 {
restPatterns = append(restPatterns, pattern)
} else {
query, value := pattern[:eqidx], pattern[eqidx+len("="):]
switch query {
case "file":
containFiles = append(containFiles, value)
case "pattern":
restPatterns = append(restPatterns, value)
case "name":
packagesNamed = append(packagesNamed, value)
case "": // not a reserved query
restPatterns = append(restPatterns, pattern)
default:
for _, rune := range query {
if rune < 'a' || rune > 'z' { // not a reserved query
restPatterns = append(restPatterns, pattern)
continue extractQueries
}
}
// Reject all other patterns containing "="
return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern)
}
}
}
patterns = restPatterns
// Look for the deprecated contains: syntax.
// TODO(matloob): delete this around mid-October 2018.
restPatterns = restPatterns[:0]
for _, pattern := range patterns {
if strings.HasPrefix(pattern, "contains:") {
containFile := strings.TrimPrefix(pattern, "contains:")
containFiles = append(containFiles, containFile)
} else {
restPatterns = append(restPatterns, pattern)
}
}
containFiles = absJoin(cfg.Dir, containFiles)
// TODO(matloob): Remove the definition of listfunc and just use golistPackages once go1.12 is released.
var listfunc driver
listfunc = func(cfg *Config, words ...string) (*driverResponse, error) {
response, err := golistDriverCurrent(cfg, words...)
if _, ok := err.(goTooOldError); ok {
listfunc = golistDriverFallback
return listfunc(cfg, words...)
}
listfunc = golistDriverCurrent
return response, err
}
var response *driverResponse
var err error
// see if we have any patterns to pass through to go list.
if len(restPatterns) > 0 {
response, err = listfunc(cfg, restPatterns...)
if err != nil {
return nil, err
}
} else {
response = &driverResponse{}
}
if len(containFiles) == 0 && len(packagesNamed) == 0 {
return response, nil
}
seenPkgs := make(map[string]*Package) // for deduplication. different containing queries could produce same packages
for _, pkg := range response.Packages {
seenPkgs[pkg.ID] = pkg
}
addPkg := func(p *Package) {
if _, ok := seenPkgs[p.ID]; ok {
return
}
seenPkgs[p.ID] = p
response.Packages = append(response.Packages, p)
}
containsResults, err := runContainsQueries(cfg, listfunc, addPkg, containFiles)
if err != nil {
return nil, err
}
response.Roots = append(response.Roots, containsResults...)
namedResults, err := runNamedQueries(cfg, listfunc, addPkg, packagesNamed)
if err != nil {
return nil, err
}
response.Roots = append(response.Roots, namedResults...)
return response, nil
}
func runContainsQueries(cfg *Config, driver driver, addPkg func(*Package), queries []string) ([]string, error) {
var results []string
for _, query := range queries {
// TODO(matloob): Do only one query per directory.
fdir := filepath.Dir(query)
cfg.Dir = fdir
dirResponse, err := driver(cfg, ".")
if err != nil {
return nil, err
}
isRoot := make(map[string]bool, len(dirResponse.Roots))
for _, root := range dirResponse.Roots {
isRoot[root] = true
}
for _, pkg := range dirResponse.Packages {
// Add any new packages to the main set
// We don't bother to filter packages that will be dropped by the changes of roots,
// that will happen anyway during graph construction outside this function.
// Over-reporting packages is not a problem.
addPkg(pkg)
// if the package was not a root one, it cannot have the file
if !isRoot[pkg.ID] {
continue
}
for _, pkgFile := range pkg.GoFiles {
if filepath.Base(query) == filepath.Base(pkgFile) {
results = append(results, pkg.ID)
break
}
}
}
}
return results, nil
}
// modCacheRegexp splits a path in a module cache into module, module version, and package.
var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`)
func runNamedQueries(cfg *Config, driver driver, addPkg func(*Package), queries []string) ([]string, error) {
// Determine which directories are relevant to scan.
roots, modulesEnabled, err := roots(cfg)
if err != nil {
return nil, err
}
// Scan the selected directories. Simple matches, from GOPATH/GOROOT
// or the local module, can simply be "go list"ed. Matches from the
// module cache need special treatment.
var matchesMu sync.Mutex
var simpleMatches, modCacheMatches []string
add := func(root gopathwalk.Root, dir string) {
// Walk calls this concurrently; protect the result slices.
matchesMu.Lock()
defer matchesMu.Unlock()
path := dir[len(root.Path)+1:]
if pathMatchesQueries(path, queries) {
switch root.Type {
case gopathwalk.RootModuleCache:
modCacheMatches = append(modCacheMatches, path)
case gopathwalk.RootCurrentModule:
// We'd need to read go.mod to find the full
// import path. Relative's easier.
rel, err := filepath.Rel(cfg.Dir, dir)
if err != nil {
// This ought to be impossible, since
// we found dir in the current module.
panic(err)
}
simpleMatches = append(simpleMatches, "./"+rel)
case gopathwalk.RootGOPATH, gopathwalk.RootGOROOT:
simpleMatches = append(simpleMatches, path)
}
}
}
gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modulesEnabled})
var results []string
addResponse := func(r *driverResponse) {
for _, pkg := range r.Packages {
addPkg(pkg)
for _, name := range queries {
if pkg.Name == name {
results = append(results, pkg.ID)
break
}
}
}
}
if len(simpleMatches) != 0 {
resp, err := driver(cfg, simpleMatches...)
if err != nil {
return nil, err
}
addResponse(resp)
}
// Module cache matches are tricky. We want to avoid downloading new
// versions of things, so we need to use the ones present in the cache.
// go list doesn't accept version specifiers, so we have to write out a
// temporary module, and do the list in that module.
if len(modCacheMatches) != 0 {
// Collect all the matches, deduplicating by major version
// and preferring the newest.
type modInfo struct {
mod string
major string
}
mods := make(map[modInfo]string)
var imports []string
for _, modPath := range modCacheMatches {
matches := modCacheRegexp.FindStringSubmatch(modPath)
mod, ver := filepath.ToSlash(matches[1]), matches[2]
importPath := filepath.ToSlash(filepath.Join(matches[1], matches[3]))
major := semver.Major(ver)
if prevVer, ok := mods[modInfo{mod, major}]; !ok || semver.Compare(ver, prevVer) > 0 {
mods[modInfo{mod, major}] = ver
}
imports = append(imports, importPath)
}
// Build the temporary module.
var gomod bytes.Buffer
gomod.WriteString("module modquery\nrequire (\n")
for mod, version := range mods {
gomod.WriteString("\t" + mod.mod + " " + version + "\n")
}
gomod.WriteString(")\n")
tmpCfg := *cfg
var err error
tmpCfg.Dir, err = ioutil.TempDir("", "gopackages-modquery")
if err != nil {
return nil, err
}
defer os.RemoveAll(tmpCfg.Dir)
if err := ioutil.WriteFile(filepath.Join(tmpCfg.Dir, "go.mod"), gomod.Bytes(), 0777); err != nil {
return nil, fmt.Errorf("writing go.mod for module cache query: %v", err)
}
// Run the query, using the import paths calculated from the matches above.
resp, err := driver(&tmpCfg, imports...)
if err != nil {
return nil, fmt.Errorf("querying module cache matches: %v", err)
}
addResponse(resp)
}
return results, nil
}
// roots selects the appropriate paths to walk based on the passed-in configuration,
// particularly the environment and the presence of a go.mod in cfg.Dir's parents.
func roots(cfg *Config) ([]gopathwalk.Root, bool, error) {
stdout, err := invokeGo(cfg, "env", "GOROOT", "GOPATH", "GOMOD")
if err != nil {
return nil, false, err
}
fields := strings.Split(stdout.String(), "\n")
if len(fields) != 4 || len(fields[3]) != 0 {
return nil, false, fmt.Errorf("go env returned unexpected output: %q", stdout.String())
}
goroot, gopath, gomod := fields[0], filepath.SplitList(fields[1]), fields[2]
modsEnabled := gomod != ""
var roots []gopathwalk.Root
// Always add GOROOT.
roots = append(roots, gopathwalk.Root{filepath.Join(goroot, "/src"), gopathwalk.RootGOROOT})
// If modules are enabled, scan the module dir.
if modsEnabled {
roots = append(roots, gopathwalk.Root{filepath.Dir(gomod), gopathwalk.RootCurrentModule})
}
// Add either GOPATH/src or GOPATH/pkg/mod, depending on module mode.
for _, p := range gopath {
if modsEnabled {
roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache})
} else {
roots = append(roots, gopathwalk.Root{filepath.Join(p, "/src"), gopathwalk.RootGOPATH})
}
}
return roots, modsEnabled, nil
}
// These functions were copied from goimports. See further documentation there.
// pathMatchesQueries is adapted from pkgIsCandidate.
// TODO: is it reasonable to do Contains here, rather than an exact match on a path component?
func pathMatchesQueries(path string, queries []string) bool {
lastTwo := lastTwoComponents(path)
for _, query := range queries {
if strings.Contains(lastTwo, query) {
return true
}
if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(query) {
lastTwo = lowerASCIIAndRemoveHyphen(lastTwo)
if strings.Contains(lastTwo, query) {
return true
}
}
}
return false
}
// lastTwoComponents returns at most the last two path components
// of v, using either / or \ as the path separator.
func lastTwoComponents(v string) string {
nslash := 0
for i := len(v) - 1; i >= 0; i-- {
if v[i] == '/' || v[i] == '\\' {
nslash++
if nslash == 2 {
return v[i:]
}
}
}
return v
}
func hasHyphenOrUpperASCII(s string) bool {
for i := 0; i < len(s); i++ {
b := s[i]
if b == '-' || ('A' <= b && b <= 'Z') {
return true
}
}
return false
}
func lowerASCIIAndRemoveHyphen(s string) (ret string) {
buf := make([]byte, 0, len(s))
for i := 0; i < len(s); i++ {
b := s[i]
switch {
case b == '-':
continue
case 'A' <= b && b <= 'Z':
buf = append(buf, b+('a'-'A'))
default:
buf = append(buf, b)
}
}
return string(buf)
}
// Fields must match go list;
// see $GOROOT/src/cmd/go/internal/load/pkg.go.
type jsonPackage struct {
ImportPath string
Dir string
Name string
Export string
GoFiles []string
CompiledGoFiles []string
CFiles []string
CgoFiles []string
CXXFiles []string
MFiles []string
HFiles []string
FFiles []string
SFiles []string
SwigFiles []string
SwigCXXFiles []string
SysoFiles []string
Imports []string
ImportMap map[string]string
Deps []string
TestGoFiles []string
TestImports []string
XTestGoFiles []string
XTestImports []string
ForTest string // q in a "p [q.test]" package, else ""
DepOnly bool
Error *jsonPackageError
}
type jsonPackageError struct {
ImportStack []string
Pos string
Err string
}
func otherFiles(p *jsonPackage) [][]string {
return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles}
}
// golistDriverCurrent uses the "go list" command to expand the
// pattern words and return metadata for the specified packages.
// dir may be "" and env may be nil, as per os/exec.Command.
func golistDriverCurrent(cfg *Config, words ...string) (*driverResponse, error) {
// go list uses the following identifiers in ImportPath and Imports:
//
// "p" -- importable package or main (command)
// "q.test" -- q's test executable
// "p [q.test]" -- variant of p as built for q's test executable
// "q_test [q.test]" -- q's external test package
//
// The packages p that are built differently for a test q.test
// are q itself, plus any helpers used by the external test q_test,
// typically including "testing" and all its dependencies.
// Run "go list" for complete
// information on the specified packages.
buf, err := invokeGo(cfg, golistargs(cfg, words)...)
if err != nil {
return nil, err
}
// Decode the JSON and convert it to Package form.
var response driverResponse
for dec := json.NewDecoder(buf); dec.More(); {
p := new(jsonPackage)
if err := dec.Decode(p); err != nil {
return nil, fmt.Errorf("JSON decoding failed: %v", err)
}
if p.ImportPath == "" {
// The documentation for go list says that “[e]rroneous packages will have
// a non-empty ImportPath”. If for some reason it comes back empty, we
// prefer to error out rather than silently discarding data or handing
// back a package without any way to refer to it.
if p.Error != nil {
return nil, Error{
Pos: p.Error.Pos,
Msg: p.Error.Err,
}
}
return nil, fmt.Errorf("package missing import path: %+v", p)
}
pkg := &Package{
Name: p.Name,
ID: p.ImportPath,
GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
OtherFiles: absJoin(p.Dir, otherFiles(p)...),
}
// Extract the PkgPath from the package's ID.
if i := strings.IndexByte(pkg.ID, ' '); i >= 0 {
pkg.PkgPath = pkg.ID[:i]
} else {
pkg.PkgPath = pkg.ID
}
if pkg.PkgPath == "unsafe" {
pkg.GoFiles = nil // ignore fake unsafe.go file
}
// Assume go list emits only absolute paths for Dir.
if p.Dir != "" && !filepath.IsAbs(p.Dir) {
log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir)
}
if p.Export != "" && !filepath.IsAbs(p.Export) {
pkg.ExportFile = filepath.Join(p.Dir, p.Export)
} else {
pkg.ExportFile = p.Export
}
// imports
//
// Imports contains the IDs of all imported packages.
// ImportsMap records (path, ID) only where they differ.
ids := make(map[string]bool)
for _, id := range p.Imports {
ids[id] = true
}
pkg.Imports = make(map[string]*Package)
for path, id := range p.ImportMap {
pkg.Imports[path] = &Package{ID: id} // non-identity import
delete(ids, id)
}
for id := range ids {
if id == "C" {
continue
}
pkg.Imports[id] = &Package{ID: id} // identity import
}
if !p.DepOnly {
response.Roots = append(response.Roots, pkg.ID)
}
// TODO(matloob): Temporary hack since CompiledGoFiles isn't always set.
if len(pkg.CompiledGoFiles) == 0 {
pkg.CompiledGoFiles = pkg.GoFiles
}
if p.Error != nil {
pkg.Errors = append(pkg.Errors, Error{
Pos: p.Error.Pos,
Msg: p.Error.Err,
})
}
response.Packages = append(response.Packages, pkg)
}
return &response, nil
}
// absJoin absolutizes and flattens the lists of files.
func absJoin(dir string, fileses ...[]string) (res []string) {
for _, files := range fileses {
for _, file := range files {
if !filepath.IsAbs(file) {
file = filepath.Join(dir, file)
}
res = append(res, file)
}
}
return res
}
func golistargs(cfg *Config, words []string) []string {
fullargs := []string{
"list", "-e", "-json", "-compiled",
fmt.Sprintf("-test=%t", cfg.Tests),
fmt.Sprintf("-export=%t", usesExportData(cfg)),
fmt.Sprintf("-deps=%t", cfg.Mode >= LoadImports),
}
fullargs = append(fullargs, cfg.BuildFlags...)
fullargs = append(fullargs, "--")
fullargs = append(fullargs, words...)
return fullargs
}
// invokeGo returns the stdout of a go command invocation.
func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) {
stdout := new(bytes.Buffer)
stderr := new(bytes.Buffer)
cmd := exec.CommandContext(cfg.Context, "go", args...)
// On darwin the cwd gets resolved to the real path, which breaks anything that
// expects the working directory to keep the original path, including the
// go command when dealing with modules.
// The Go stdlib has a special feature where if the cwd and the PWD are the
// same node then it trusts the PWD, so by setting it in the env for the child
// process we fix up all the paths returned by the go command.
cmd.Env = append(append([]string{}, cfg.Env...), "PWD="+cfg.Dir)
cmd.Dir = cfg.Dir
cmd.Stdout = stdout
cmd.Stderr = stderr
if err := cmd.Run(); err != nil {
exitErr, ok := err.(*exec.ExitError)
if !ok {
// Catastrophic error:
// - executable not found
// - context cancellation
return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err)
}
// Old go version?
if strings.Contains(stderr.String(), "flag provided but not defined") {
return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)}
}
// Export mode entails a build.
// If that build fails, errors appear on stderr
// (despite the -e flag) and the Export field is blank.
// Do not fail in that case.
if !usesExportData(cfg) {
return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
}
}
// As of writing, go list -export prints some non-fatal compilation
// errors to stderr, even with -e set. We would prefer that it put
// them in the Package.Error JSON (see http://golang.org/issue/26319).
// In the meantime, there's nowhere good to put them, but they can
// be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS
// is set.
if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" {
fmt.Fprintf(os.Stderr, "go %v stderr: <<%s>>\n", args, stderr)
}
// debugging
if false {
fmt.Fprintf(os.Stderr, "go %v stdout: <<%s>>\n", args, stdout)
}
return stdout, nil
}

View file

@ -1,457 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packages
import (
"encoding/json"
"fmt"
"go/build"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"sort"
"strings"
"golang.org/x/tools/go/internal/cgo"
)
// TODO(matloob): Delete this file once Go 1.12 is released.
// This file provides backwards compatibility support for
// loading for versions of Go earlier than 1.10.4. This support is meant to
// assist with migration to the Package API until there's
// widespread adoption of these newer Go versions.
// This support will be removed once Go 1.12 is released
// in Q1 2019.
func golistDriverFallback(cfg *Config, words ...string) (*driverResponse, error) {
// Turn absolute paths into GOROOT and GOPATH-relative paths to provide to go list.
// This will have surprising behavior if GOROOT or GOPATH contain multiple packages with the same
// path and a user provides an absolute path to a directory that's shadowed by an earlier
// directory in GOROOT or GOPATH with the same package path.
words = cleanAbsPaths(cfg, words)
original, deps, err := getDeps(cfg, words...)
if err != nil {
return nil, err
}
var tmpdir string // used for generated cgo files
var needsTestVariant []struct {
pkg, xtestPkg *Package
}
var response driverResponse
allPkgs := make(map[string]bool)
addPackage := func(p *jsonPackage) {
id := p.ImportPath
if allPkgs[id] {
return
}
allPkgs[id] = true
isRoot := original[id] != nil
pkgpath := id
if pkgpath == "unsafe" {
p.GoFiles = nil // ignore fake unsafe.go file
}
importMap := func(importlist []string) map[string]*Package {
importMap := make(map[string]*Package)
for _, id := range importlist {
if id == "C" {
for _, path := range []string{"unsafe", "syscall", "runtime/cgo"} {
if pkgpath != path && importMap[path] == nil {
importMap[path] = &Package{ID: path}
}
}
continue
}
importMap[vendorlessPath(id)] = &Package{ID: id}
}
return importMap
}
compiledGoFiles := absJoin(p.Dir, p.GoFiles)
// Use a function to simplify control flow. It's just a bunch of gotos.
var cgoErrors []error
var outdir string
getOutdir := func() (string, error) {
if outdir != "" {
return outdir, nil
}
if tmpdir == "" {
if tmpdir, err = ioutil.TempDir("", "gopackages"); err != nil {
return "", err
}
}
// Add a "go-build" component to the path to make the tests think the files are in the cache.
// This allows the same test to test the pre- and post-Go 1.11 go list logic because the Go 1.11
// go list generates test mains in the cache, and the test code knows not to rely on paths in the
// cache to stay stable.
outdir = filepath.Join(tmpdir, "go-build", strings.Replace(p.ImportPath, "/", "_", -1))
if err := os.MkdirAll(outdir, 0755); err != nil {
outdir = ""
return "", err
}
return outdir, nil
}
processCgo := func() bool {
// Suppress any cgo errors. Any relevant errors will show up in typechecking.
// TODO(matloob): Skip running cgo if Mode < LoadTypes.
outdir, err := getOutdir()
if err != nil {
cgoErrors = append(cgoErrors, err)
return false
}
files, _, err := runCgo(p.Dir, outdir, cfg.Env)
if err != nil {
cgoErrors = append(cgoErrors, err)
return false
}
compiledGoFiles = append(compiledGoFiles, files...)
return true
}
if len(p.CgoFiles) == 0 || !processCgo() {
compiledGoFiles = append(compiledGoFiles, absJoin(p.Dir, p.CgoFiles)...) // Punt to typechecker.
}
if isRoot {
response.Roots = append(response.Roots, id)
}
pkg := &Package{
ID: id,
Name: p.Name,
GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
CompiledGoFiles: compiledGoFiles,
OtherFiles: absJoin(p.Dir, otherFiles(p)...),
PkgPath: pkgpath,
Imports: importMap(p.Imports),
// TODO(matloob): set errors on the Package to cgoErrors
}
if p.Error != nil {
pkg.Errors = append(pkg.Errors, Error{
Pos: p.Error.Pos,
Msg: p.Error.Err,
})
}
response.Packages = append(response.Packages, pkg)
if cfg.Tests && isRoot {
testID := fmt.Sprintf("%s [%s.test]", id, id)
if len(p.TestGoFiles) > 0 || len(p.XTestGoFiles) > 0 {
response.Roots = append(response.Roots, testID)
testPkg := &Package{
ID: testID,
Name: p.Name,
GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles, p.TestGoFiles),
CompiledGoFiles: append(compiledGoFiles, absJoin(p.Dir, p.TestGoFiles)...),
OtherFiles: absJoin(p.Dir, otherFiles(p)...),
PkgPath: pkgpath,
Imports: importMap(append(p.Imports, p.TestImports...)),
// TODO(matloob): set errors on the Package to cgoErrors
}
response.Packages = append(response.Packages, testPkg)
var xtestPkg *Package
if len(p.XTestGoFiles) > 0 {
xtestID := fmt.Sprintf("%s_test [%s.test]", id, id)
response.Roots = append(response.Roots, xtestID)
// Generate test variants for all packages q where a path exists
// such that xtestPkg -> ... -> q -> ... -> p (where p is the package under test)
// and rewrite all import map entries of p to point to testPkg (the test variant of
// p), and of each q to point to the test variant of that q.
xtestPkg = &Package{
ID: xtestID,
Name: p.Name + "_test",
GoFiles: absJoin(p.Dir, p.XTestGoFiles),
CompiledGoFiles: absJoin(p.Dir, p.XTestGoFiles),
PkgPath: pkgpath + "_test",
Imports: importMap(p.XTestImports),
}
// Add to list of packages we need to rewrite imports for to refer to test variants.
// We may need to create a test variant of a package that hasn't been loaded yet, so
// the test variants need to be created later.
needsTestVariant = append(needsTestVariant, struct{ pkg, xtestPkg *Package }{pkg, xtestPkg})
response.Packages = append(response.Packages, xtestPkg)
}
// testmain package
testmainID := id + ".test"
response.Roots = append(response.Roots, testmainID)
imports := map[string]*Package{}
imports[testPkg.PkgPath] = &Package{ID: testPkg.ID}
if xtestPkg != nil {
imports[xtestPkg.PkgPath] = &Package{ID: xtestPkg.ID}
}
testmainPkg := &Package{
ID: testmainID,
Name: "main",
PkgPath: testmainID,
Imports: imports,
}
response.Packages = append(response.Packages, testmainPkg)
outdir, err := getOutdir()
if err != nil {
testmainPkg.Errors = append(testmainPkg.Errors, Error{
Pos: "-",
Msg: fmt.Sprintf("failed to generate testmain: %v", err),
Kind: ListError,
})
return
}
testmain := filepath.Join(outdir, "testmain.go")
extraimports, extradeps, err := generateTestmain(testmain, testPkg, xtestPkg)
if err != nil {
testmainPkg.Errors = append(testmainPkg.Errors, Error{
Pos: "-",
Msg: fmt.Sprintf("failed to generate testmain: %v", err),
Kind: ListError,
})
}
deps = append(deps, extradeps...)
for _, imp := range extraimports { // testing, testing/internal/testdeps, and maybe os
imports[imp] = &Package{ID: imp}
}
testmainPkg.GoFiles = []string{testmain}
testmainPkg.CompiledGoFiles = []string{testmain}
}
}
}
for _, pkg := range original {
addPackage(pkg)
}
if cfg.Mode < LoadImports || len(deps) == 0 {
return &response, nil
}
buf, err := invokeGo(cfg, golistArgsFallback(cfg, deps)...)
if err != nil {
return nil, err
}
// Decode the JSON and convert it to Package form.
for dec := json.NewDecoder(buf); dec.More(); {
p := new(jsonPackage)
if err := dec.Decode(p); err != nil {
return nil, fmt.Errorf("JSON decoding failed: %v", err)
}
addPackage(p)
}
for _, v := range needsTestVariant {
createTestVariants(&response, v.pkg, v.xtestPkg)
}
// TODO(matloob): Is this the right ordering?
sort.SliceStable(response.Packages, func(i, j int) bool {
return response.Packages[i].PkgPath < response.Packages[j].PkgPath
})
return &response, nil
}
func createTestVariants(response *driverResponse, pkgUnderTest, xtestPkg *Package) {
allPkgs := make(map[string]*Package)
for _, pkg := range response.Packages {
allPkgs[pkg.ID] = pkg
}
needsTestVariant := make(map[string]bool)
needsTestVariant[pkgUnderTest.ID] = true
var needsVariantRec func(p *Package) bool
needsVariantRec = func(p *Package) bool {
if needsTestVariant[p.ID] {
return true
}
for _, imp := range p.Imports {
if needsVariantRec(allPkgs[imp.ID]) {
// Don't break because we want to make sure all dependencies
// have been processed, and all required test variants of our dependencies
// exist.
needsTestVariant[p.ID] = true
}
}
if !needsTestVariant[p.ID] {
return false
}
// Create a clone of the package. It will share the same strings and lists of source files,
// but that's okay. It's only necessary for the Imports map to have a separate identity.
testVariant := *p
testVariant.ID = fmt.Sprintf("%s [%s.test]", p.ID, pkgUnderTest.ID)
testVariant.Imports = make(map[string]*Package)
for imp, pkg := range p.Imports {
testVariant.Imports[imp] = pkg
if needsTestVariant[pkg.ID] {
testVariant.Imports[imp] = &Package{ID: fmt.Sprintf("%s [%s.test]", pkg.ID, pkgUnderTest.ID)}
}
}
response.Packages = append(response.Packages, &testVariant)
return needsTestVariant[p.ID]
}
// finally, update the xtest package's imports
for imp, pkg := range xtestPkg.Imports {
if allPkgs[pkg.ID] == nil {
fmt.Printf("for %s: package %s doesn't exist\n", xtestPkg.ID, pkg.ID)
}
if needsVariantRec(allPkgs[pkg.ID]) {
xtestPkg.Imports[imp] = &Package{ID: fmt.Sprintf("%s [%s.test]", pkg.ID, pkgUnderTest.ID)}
}
}
}
// cleanAbsPaths replaces all absolute paths with GOPATH- and GOROOT-relative
// paths. If an absolute path is not GOPATH- or GOROOT- relative, it is left as an
// absolute path so an error can be returned later.
func cleanAbsPaths(cfg *Config, words []string) []string {
var searchpaths []string
var cleaned = make([]string, len(words))
for i := range cleaned {
cleaned[i] = words[i]
// Ignore relative directory paths (they must already be goroot-relative) and Go source files
// (absolute source files are already allowed for ad-hoc packages).
// TODO(matloob): Can there be non-.go files in ad-hoc packages.
if !filepath.IsAbs(cleaned[i]) || strings.HasSuffix(cleaned[i], ".go") {
continue
}
// otherwise, it's an absolute path. Search GOPATH and GOROOT to find it.
if searchpaths == nil {
cmd := exec.Command("go", "env", "GOPATH", "GOROOT")
cmd.Env = cfg.Env
out, err := cmd.Output()
if err != nil {
searchpaths = []string{}
continue // suppress the error, it will show up again when running go list
}
lines := strings.Split(string(out), "\n")
if len(lines) != 3 || lines[0] == "" || lines[1] == "" || lines[2] != "" {
continue // suppress error
}
// first line is GOPATH
for _, path := range filepath.SplitList(lines[0]) {
searchpaths = append(searchpaths, filepath.Join(path, "src"))
}
// second line is GOROOT
searchpaths = append(searchpaths, filepath.Join(lines[1], "src"))
}
for _, sp := range searchpaths {
if strings.HasPrefix(cleaned[i], sp) {
cleaned[i] = strings.TrimPrefix(cleaned[i], sp)
cleaned[i] = strings.TrimLeft(cleaned[i], string(filepath.Separator))
}
}
}
return cleaned
}
// vendorlessPath returns the devendorized version of the import path ipath.
// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b".
// Copied from golang.org/x/tools/imports/fix.go.
func vendorlessPath(ipath string) string {
// Devendorize for use in import statement.
if i := strings.LastIndex(ipath, "/vendor/"); i >= 0 {
return ipath[i+len("/vendor/"):]
}
if strings.HasPrefix(ipath, "vendor/") {
return ipath[len("vendor/"):]
}
return ipath
}
// getDeps runs an initial go list to determine all the dependency packages.
func getDeps(cfg *Config, words ...string) (originalSet map[string]*jsonPackage, deps []string, err error) {
buf, err := invokeGo(cfg, golistArgsFallback(cfg, words)...)
if err != nil {
return nil, nil, err
}
depsSet := make(map[string]bool)
originalSet = make(map[string]*jsonPackage)
var testImports []string
// Extract deps from the JSON.
for dec := json.NewDecoder(buf); dec.More(); {
p := new(jsonPackage)
if err := dec.Decode(p); err != nil {
return nil, nil, fmt.Errorf("JSON decoding failed: %v", err)
}
originalSet[p.ImportPath] = p
for _, dep := range p.Deps {
depsSet[dep] = true
}
if cfg.Tests {
// collect the additional imports of the test packages.
pkgTestImports := append(p.TestImports, p.XTestImports...)
for _, imp := range pkgTestImports {
if depsSet[imp] {
continue
}
depsSet[imp] = true
testImports = append(testImports, imp)
}
}
}
// Get the deps of the packages imported by tests.
if len(testImports) > 0 {
buf, err = invokeGo(cfg, golistArgsFallback(cfg, testImports)...)
if err != nil {
return nil, nil, err
}
// Extract deps from the JSON.
for dec := json.NewDecoder(buf); dec.More(); {
p := new(jsonPackage)
if err := dec.Decode(p); err != nil {
return nil, nil, fmt.Errorf("JSON decoding failed: %v", err)
}
for _, dep := range p.Deps {
depsSet[dep] = true
}
}
}
for orig := range originalSet {
delete(depsSet, orig)
}
deps = make([]string, 0, len(depsSet))
for dep := range depsSet {
deps = append(deps, dep)
}
sort.Strings(deps) // ensure output is deterministic
return originalSet, deps, nil
}
func golistArgsFallback(cfg *Config, words []string) []string {
fullargs := []string{"list", "-e", "-json"}
fullargs = append(fullargs, cfg.BuildFlags...)
fullargs = append(fullargs, "--")
fullargs = append(fullargs, words...)
return fullargs
}
func runCgo(pkgdir, tmpdir string, env []string) (files, displayfiles []string, err error) {
// Use go/build to open cgo files and determine the cgo flags, etc, from them.
// This is tricky so it's best to avoid reimplementing as much as we can, and
// we plan to delete this support once Go 1.12 is released anyways.
// TODO(matloob): This isn't completely correct because we're using the Default
// context. Perhaps we should more accurately fill in the context.
bp, err := build.ImportDir(pkgdir, build.ImportMode(0))
if err != nil {
return nil, nil, err
}
for _, ev := range env {
if v := strings.TrimPrefix(ev, "CGO_CPPFLAGS"); v != ev {
bp.CgoCPPFLAGS = append(bp.CgoCPPFLAGS, strings.Fields(v)...)
} else if v := strings.TrimPrefix(ev, "CGO_CFLAGS"); v != ev {
bp.CgoCFLAGS = append(bp.CgoCFLAGS, strings.Fields(v)...)
} else if v := strings.TrimPrefix(ev, "CGO_CXXFLAGS"); v != ev {
bp.CgoCXXFLAGS = append(bp.CgoCXXFLAGS, strings.Fields(v)...)
} else if v := strings.TrimPrefix(ev, "CGO_LDFLAGS"); v != ev {
bp.CgoLDFLAGS = append(bp.CgoLDFLAGS, strings.Fields(v)...)
}
}
return cgo.Run(bp, pkgdir, tmpdir, true)
}

View file

@ -1,318 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file is largely based on the Go 1.10-era cmd/go/internal/test/test.go
// testmain generation code.
package packages
import (
"errors"
"fmt"
"go/ast"
"go/doc"
"go/parser"
"go/token"
"os"
"sort"
"strings"
"text/template"
"unicode"
"unicode/utf8"
)
// TODO(matloob): Delete this file once Go 1.12 is released.
// This file complements golist_fallback.go by providing
// support for generating testmains.
func generateTestmain(out string, testPkg, xtestPkg *Package) (extraimports, extradeps []string, err error) {
testFuncs, err := loadTestFuncs(testPkg, xtestPkg)
if err != nil {
return nil, nil, err
}
extraimports = []string{"testing", "testing/internal/testdeps"}
if testFuncs.TestMain == nil {
extraimports = append(extraimports, "os")
}
// Transitive dependencies of ("testing", "testing/internal/testdeps").
// os is part of the transitive closure so it and its transitive dependencies are
// included regardless of whether it's imported in the template below.
extradeps = []string{
"errors",
"internal/cpu",
"unsafe",
"internal/bytealg",
"internal/race",
"runtime/internal/atomic",
"runtime/internal/sys",
"runtime",
"sync/atomic",
"sync",
"io",
"unicode",
"unicode/utf8",
"bytes",
"math",
"syscall",
"time",
"internal/poll",
"internal/syscall/unix",
"internal/testlog",
"os",
"math/bits",
"strconv",
"reflect",
"fmt",
"sort",
"strings",
"flag",
"runtime/debug",
"context",
"runtime/trace",
"testing",
"bufio",
"regexp/syntax",
"regexp",
"compress/flate",
"encoding/binary",
"hash",
"hash/crc32",
"compress/gzip",
"path/filepath",
"io/ioutil",
"text/tabwriter",
"runtime/pprof",
"testing/internal/testdeps",
}
return extraimports, extradeps, writeTestmain(out, testFuncs)
}
// The following is adapted from the cmd/go testmain generation code.
// isTestFunc tells whether fn has the type of a testing function. arg
// specifies the parameter type we look for: B, M or T.
func isTestFunc(fn *ast.FuncDecl, arg string) bool {
if fn.Type.Results != nil && len(fn.Type.Results.List) > 0 ||
fn.Type.Params.List == nil ||
len(fn.Type.Params.List) != 1 ||
len(fn.Type.Params.List[0].Names) > 1 {
return false
}
ptr, ok := fn.Type.Params.List[0].Type.(*ast.StarExpr)
if !ok {
return false
}
// We can't easily check that the type is *testing.M
// because we don't know how testing has been imported,
// but at least check that it's *M or *something.M.
// Same applies for B and T.
if name, ok := ptr.X.(*ast.Ident); ok && name.Name == arg {
return true
}
if sel, ok := ptr.X.(*ast.SelectorExpr); ok && sel.Sel.Name == arg {
return true
}
return false
}
// isTest tells whether name looks like a test (or benchmark, according to prefix).
// It is a Test (say) if there is a character after Test that is not a lower-case letter.
// We don't want TesticularCancer.
func isTest(name, prefix string) bool {
if !strings.HasPrefix(name, prefix) {
return false
}
if len(name) == len(prefix) { // "Test" is ok
return true
}
rune, _ := utf8.DecodeRuneInString(name[len(prefix):])
return !unicode.IsLower(rune)
}
// loadTestFuncs returns the testFuncs describing the tests that will be run.
func loadTestFuncs(ptest, pxtest *Package) (*testFuncs, error) {
t := &testFuncs{
TestPackage: ptest,
XTestPackage: pxtest,
}
for _, file := range ptest.GoFiles {
if !strings.HasSuffix(file, "_test.go") {
continue
}
if err := t.load(file, "_test", &t.ImportTest, &t.NeedTest); err != nil {
return nil, err
}
}
if pxtest != nil {
for _, file := range pxtest.GoFiles {
if err := t.load(file, "_xtest", &t.ImportXtest, &t.NeedXtest); err != nil {
return nil, err
}
}
}
return t, nil
}
// writeTestmain writes the _testmain.go file for t to the file named out.
func writeTestmain(out string, t *testFuncs) error {
f, err := os.Create(out)
if err != nil {
return err
}
defer f.Close()
if err := testmainTmpl.Execute(f, t); err != nil {
return err
}
return nil
}
type testFuncs struct {
Tests []testFunc
Benchmarks []testFunc
Examples []testFunc
TestMain *testFunc
TestPackage *Package
XTestPackage *Package
ImportTest bool
NeedTest bool
ImportXtest bool
NeedXtest bool
}
// Tested returns the name of the package being tested.
func (t *testFuncs) Tested() string {
return t.TestPackage.Name
}
type testFunc struct {
Package string // imported package name (_test or _xtest)
Name string // function name
Output string // output, for examples
Unordered bool // output is allowed to be unordered.
}
func (t *testFuncs) load(filename, pkg string, doImport, seen *bool) error {
var fset = token.NewFileSet()
f, err := parser.ParseFile(fset, filename, nil, parser.ParseComments)
if err != nil {
return errors.New("failed to parse test file " + filename)
}
for _, d := range f.Decls {
n, ok := d.(*ast.FuncDecl)
if !ok {
continue
}
if n.Recv != nil {
continue
}
name := n.Name.String()
switch {
case name == "TestMain":
if isTestFunc(n, "T") {
t.Tests = append(t.Tests, testFunc{pkg, name, "", false})
*doImport, *seen = true, true
continue
}
err := checkTestFunc(fset, n, "M")
if err != nil {
return err
}
if t.TestMain != nil {
return errors.New("multiple definitions of TestMain")
}
t.TestMain = &testFunc{pkg, name, "", false}
*doImport, *seen = true, true
case isTest(name, "Test"):
err := checkTestFunc(fset, n, "T")
if err != nil {
return err
}
t.Tests = append(t.Tests, testFunc{pkg, name, "", false})
*doImport, *seen = true, true
case isTest(name, "Benchmark"):
err := checkTestFunc(fset, n, "B")
if err != nil {
return err
}
t.Benchmarks = append(t.Benchmarks, testFunc{pkg, name, "", false})
*doImport, *seen = true, true
}
}
ex := doc.Examples(f)
sort.Slice(ex, func(i, j int) bool { return ex[i].Order < ex[j].Order })
for _, e := range ex {
*doImport = true // import test file whether executed or not
if e.Output == "" && !e.EmptyOutput {
// Don't run examples with no output.
continue
}
t.Examples = append(t.Examples, testFunc{pkg, "Example" + e.Name, e.Output, e.Unordered})
*seen = true
}
return nil
}
func checkTestFunc(fset *token.FileSet, fn *ast.FuncDecl, arg string) error {
if !isTestFunc(fn, arg) {
name := fn.Name.String()
pos := fset.Position(fn.Pos())
return fmt.Errorf("%s: wrong signature for %s, must be: func %s(%s *testing.%s)", pos, name, name, strings.ToLower(arg), arg)
}
return nil
}
var testmainTmpl = template.Must(template.New("main").Parse(`
package main
import (
{{if not .TestMain}}
"os"
{{end}}
"testing"
"testing/internal/testdeps"
{{if .ImportTest}}
{{if .NeedTest}}_test{{else}}_{{end}} {{.TestPackage.PkgPath | printf "%q"}}
{{end}}
{{if .ImportXtest}}
{{if .NeedXtest}}_xtest{{else}}_{{end}} {{.XTestPackage.PkgPath | printf "%q"}}
{{end}}
)
var tests = []testing.InternalTest{
{{range .Tests}}
{"{{.Name}}", {{.Package}}.{{.Name}}},
{{end}}
}
var benchmarks = []testing.InternalBenchmark{
{{range .Benchmarks}}
{"{{.Name}}", {{.Package}}.{{.Name}}},
{{end}}
}
var examples = []testing.InternalExample{
{{range .Examples}}
{"{{.Name}}", {{.Package}}.{{.Name}}, {{.Output | printf "%q"}}, {{.Unordered}}},
{{end}}
}
func init() {
testdeps.ImportPath = {{.TestPackage.PkgPath | printf "%q"}}
}
func main() {
m := testing.MainStart(testdeps.TestDeps{}, tests, benchmarks, examples)
{{with .TestMain}}
{{.Package}}.{{.Name}}(m)
{{else}}
os.Exit(m.Run())
{{end}}
}
`))

View file

@ -1,935 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packages
// See doc.go for package documentation and implementation notes.
import (
"context"
"encoding/json"
"fmt"
"go/ast"
"go/parser"
"go/scanner"
"go/token"
"go/types"
"io/ioutil"
"log"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"golang.org/x/tools/go/gcexportdata"
)
// A LoadMode specifies the amount of detail to return when loading.
// Higher-numbered modes cause Load to return more information,
// but may be slower. Load may return more information than requested.
type LoadMode int
const (
// LoadFiles finds the packages and computes their source file lists.
// Package fields: ID, Name, Errors, GoFiles, and OtherFiles.
LoadFiles LoadMode = iota
// LoadImports adds import information for each package
// and its dependencies.
// Package fields added: Imports.
LoadImports
// LoadTypes adds type information for package-level
// declarations in the packages matching the patterns.
// Package fields added: Types, Fset, and IllTyped.
// This mode uses type information provided by the build system when
// possible, and may fill in the ExportFile field.
LoadTypes
// LoadSyntax adds typed syntax trees for the packages matching the patterns.
// Package fields added: Syntax, and TypesInfo, for direct pattern matches only.
LoadSyntax
// LoadAllSyntax adds typed syntax trees for the packages matching the patterns
// and all dependencies.
// Package fields added: Types, Fset, Illtyped, Syntax, and TypesInfo,
// for all packages in the import graph.
LoadAllSyntax
)
// An Config specifies details about how packages should be loaded.
// The zero value is a valid configuration.
// Calls to Load do not modify this struct.
type Config struct {
// Mode controls the level of information returned for each package.
Mode LoadMode
// Context specifies the context for the load operation.
// If the context is cancelled, the loader may stop early
// and return an ErrCancelled error.
// If Context is nil, the load cannot be cancelled.
Context context.Context
// Dir is the directory in which to run the build system's query tool
// that provides information about the packages.
// If Dir is empty, the tool is run in the current directory.
Dir string
// Env is the environment to use when invoking the build system's query tool.
// If Env is nil, the current environment is used.
// As in os/exec's Cmd, only the last value in the slice for
// each environment key is used. To specify the setting of only
// a few variables, append to the current environment, as in:
//
// opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386")
//
Env []string
// BuildFlags is a list of command-line flags to be passed through to
// the build system's query tool.
BuildFlags []string
// Fset provides source position information for syntax trees and types.
// If Fset is nil, the loader will create a new FileSet.
Fset *token.FileSet
// ParseFile is called to read and parse each file
// when preparing a package's type-checked syntax tree.
// It must be safe to call ParseFile simultaneously from multiple goroutines.
// If ParseFile is nil, the loader will uses parser.ParseFile.
//
// ParseFile should parse the source from src and use filename only for
// recording position information.
//
// An application may supply a custom implementation of ParseFile
// to change the effective file contents or the behavior of the parser,
// or to modify the syntax tree. For example, selectively eliminating
// unwanted function bodies can significantly accelerate type checking.
ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error)
// If Tests is set, the loader includes not just the packages
// matching a particular pattern but also any related test packages,
// including test-only variants of the package and the test executable.
//
// For example, when using the go command, loading "fmt" with Tests=true
// returns four packages, with IDs "fmt" (the standard package),
// "fmt [fmt.test]" (the package as compiled for the test),
// "fmt_test" (the test functions from source files in package fmt_test),
// and "fmt.test" (the test binary).
//
// In build systems with explicit names for tests,
// setting Tests may have no effect.
Tests bool
// Overlay provides a mapping of absolute file paths to file contents.
// If the file with the given path already exists, the parser will use the
// alternative file contents provided by the map.
//
// The Package.Imports map may not include packages that are imported only
// by the alternative file contents provided by Overlay. This may cause
// type-checking to fail.
Overlay map[string][]byte
}
// driver is the type for functions that query the build system for the
// packages named by the patterns.
type driver func(cfg *Config, patterns ...string) (*driverResponse, error)
// driverResponse contains the results for a driver query.
type driverResponse struct {
// Roots is the set of package IDs that make up the root packages.
// We have to encode this separately because when we encode a single package
// we cannot know if it is one of the roots as that requires knowledge of the
// graph it is part of.
Roots []string `json:",omitempty"`
// Packages is the full set of packages in the graph.
// The packages are not connected into a graph.
// The Imports if populated will be stubs that only have their ID set.
// Imports will be connected and then type and syntax information added in a
// later pass (see refine).
Packages []*Package
}
// Load loads and returns the Go packages named by the given patterns.
//
// Config specifies loading options;
// nil behaves the same as an empty Config.
//
// Load returns an error if any of the patterns was invalid
// as defined by the underlying build system.
// It may return an empty list of packages without an error,
// for instance for an empty expansion of a valid wildcard.
// Errors associated with a particular package are recorded in the
// corresponding Package's Errors list, and do not cause Load to
// return an error. Clients may need to handle such errors before
// proceeding with further analysis. The PrintErrors function is
// provided for convenient display of all errors.
func Load(cfg *Config, patterns ...string) ([]*Package, error) {
l := newLoader(cfg)
response, err := defaultDriver(&l.Config, patterns...)
if err != nil {
return nil, err
}
return l.refine(response.Roots, response.Packages...)
}
// defaultDriver is a driver that looks for an external driver binary, and if
// it does not find it falls back to the built in go list driver.
func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
driver := findExternalDriver(cfg)
if driver == nil {
driver = goListDriver
}
return driver(cfg, patterns...)
}
// A Package describes a loaded Go package.
type Package struct {
// ID is a unique identifier for a package,
// in a syntax provided by the underlying build system.
//
// Because the syntax varies based on the build system,
// clients should treat IDs as opaque and not attempt to
// interpret them.
ID string
// Name is the package name as it appears in the package source code.
Name string
// PkgPath is the package path as used by the go/types package.
PkgPath string
// Errors contains any errors encountered querying the metadata
// of the package, or while parsing or type-checking its files.
Errors []Error
// GoFiles lists the absolute file paths of the package's Go source files.
GoFiles []string
// CompiledGoFiles lists the absolute file paths of the package's source
// files that were presented to the compiler.
// This may differ from GoFiles if files are processed before compilation.
CompiledGoFiles []string
// OtherFiles lists the absolute file paths of the package's non-Go source files,
// including assembly, C, C++, Fortran, Objective-C, SWIG, and so on.
OtherFiles []string
// ExportFile is the absolute path to a file containing type
// information for the package as provided by the build system.
ExportFile string
// Imports maps import paths appearing in the package's Go source files
// to corresponding loaded Packages.
Imports map[string]*Package
// Types provides type information for the package.
// Modes LoadTypes and above set this field for packages matching the
// patterns; type information for dependencies may be missing or incomplete.
// Mode LoadAllSyntax sets this field for all packages, including dependencies.
Types *types.Package
// Fset provides position information for Types, TypesInfo, and Syntax.
// It is set only when Types is set.
Fset *token.FileSet
// IllTyped indicates whether the package or any dependency contains errors.
// It is set only when Types is set.
IllTyped bool
// Syntax is the package's syntax trees, for the files listed in CompiledGoFiles.
//
// Mode LoadSyntax sets this field for packages matching the patterns.
// Mode LoadAllSyntax sets this field for all packages, including dependencies.
Syntax []*ast.File
// TypesInfo provides type information about the package's syntax trees.
// It is set only when Syntax is set.
TypesInfo *types.Info
}
// An Error describes a problem with a package's metadata, syntax, or types.
type Error struct {
Pos string // "file:line:col" or "file:line" or "" or "-"
Msg string
Kind ErrorKind
}
// ErrorKind describes the source of the error, allowing the user to
// differentiate between errors generated by the driver, the parser, or the
// type-checker.
type ErrorKind int
const (
UnknownError ErrorKind = iota
ListError
ParseError
TypeError
)
func (err Error) Error() string {
pos := err.Pos
if pos == "" {
pos = "-" // like token.Position{}.String()
}
return pos + ": " + err.Msg
}
// flatPackage is the JSON form of Package
// It drops all the type and syntax fields, and transforms the Imports
//
// TODO(adonovan): identify this struct with Package, effectively
// publishing the JSON protocol.
type flatPackage struct {
ID string
Name string `json:",omitempty"`
PkgPath string `json:",omitempty"`
Errors []Error `json:",omitempty"`
GoFiles []string `json:",omitempty"`
CompiledGoFiles []string `json:",omitempty"`
OtherFiles []string `json:",omitempty"`
ExportFile string `json:",omitempty"`
Imports map[string]string `json:",omitempty"`
}
// MarshalJSON returns the Package in its JSON form.
// For the most part, the structure fields are written out unmodified, and
// the type and syntax fields are skipped.
// The imports are written out as just a map of path to package id.
// The errors are written using a custom type that tries to preserve the
// structure of error types we know about.
//
// This method exists to enable support for additional build systems. It is
// not intended for use by clients of the API and we may change the format.
func (p *Package) MarshalJSON() ([]byte, error) {
flat := &flatPackage{
ID: p.ID,
Name: p.Name,
PkgPath: p.PkgPath,
Errors: p.Errors,
GoFiles: p.GoFiles,
CompiledGoFiles: p.CompiledGoFiles,
OtherFiles: p.OtherFiles,
ExportFile: p.ExportFile,
}
if len(p.Imports) > 0 {
flat.Imports = make(map[string]string, len(p.Imports))
for path, ipkg := range p.Imports {
flat.Imports[path] = ipkg.ID
}
}
return json.Marshal(flat)
}
// UnmarshalJSON reads in a Package from its JSON format.
// See MarshalJSON for details about the format accepted.
func (p *Package) UnmarshalJSON(b []byte) error {
flat := &flatPackage{}
if err := json.Unmarshal(b, &flat); err != nil {
return err
}
*p = Package{
ID: flat.ID,
Name: flat.Name,
PkgPath: flat.PkgPath,
Errors: flat.Errors,
GoFiles: flat.GoFiles,
CompiledGoFiles: flat.CompiledGoFiles,
OtherFiles: flat.OtherFiles,
ExportFile: flat.ExportFile,
}
if len(flat.Imports) > 0 {
p.Imports = make(map[string]*Package, len(flat.Imports))
for path, id := range flat.Imports {
p.Imports[path] = &Package{ID: id}
}
}
return nil
}
func (p *Package) String() string { return p.ID }
// loaderPackage augments Package with state used during the loading phase
type loaderPackage struct {
*Package
importErrors map[string]error // maps each bad import to its error
loadOnce sync.Once
color uint8 // for cycle detection
needsrc bool // load from source (Mode >= LoadTypes)
needtypes bool // type information is either requested or depended on
initial bool // package was matched by a pattern
}
// loader holds the working state of a single call to load.
type loader struct {
pkgs map[string]*loaderPackage
Config
exportMu sync.Mutex // enforces mutual exclusion of exportdata operations
}
func newLoader(cfg *Config) *loader {
ld := &loader{}
if cfg != nil {
ld.Config = *cfg
}
if ld.Config.Env == nil {
ld.Config.Env = os.Environ()
}
if ld.Context == nil {
ld.Context = context.Background()
}
if ld.Dir == "" {
if dir, err := os.Getwd(); err == nil {
ld.Dir = dir
}
}
if ld.Mode >= LoadTypes {
if ld.Fset == nil {
ld.Fset = token.NewFileSet()
}
// ParseFile is required even in LoadTypes mode
// because we load source if export data is missing.
if ld.ParseFile == nil {
ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) {
var isrc interface{}
if src != nil {
isrc = src
}
const mode = parser.AllErrors | parser.ParseComments
return parser.ParseFile(fset, filename, isrc, mode)
}
}
}
return ld
}
// refine connects the supplied packages into a graph and then adds type and
// and syntax information as requested by the LoadMode.
func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
isRoot := make(map[string]bool, len(roots))
for _, root := range roots {
isRoot[root] = true
}
ld.pkgs = make(map[string]*loaderPackage)
// first pass, fixup and build the map and roots
var initial []*loaderPackage
for _, pkg := range list {
lpkg := &loaderPackage{
Package: pkg,
needtypes: ld.Mode >= LoadAllSyntax ||
ld.Mode >= LoadTypes && isRoot[pkg.ID],
needsrc: ld.Mode >= LoadAllSyntax ||
ld.Mode >= LoadSyntax && isRoot[pkg.ID] ||
pkg.ExportFile == "" && pkg.PkgPath != "unsafe",
}
ld.pkgs[lpkg.ID] = lpkg
if isRoot[lpkg.ID] {
initial = append(initial, lpkg)
lpkg.initial = true
}
}
// Materialize the import graph.
const (
white = 0 // new
grey = 1 // in progress
black = 2 // complete
)
// visit traverses the import graph, depth-first,
// and materializes the graph as Packages.Imports.
//
// Valid imports are saved in the Packages.Import map.
// Invalid imports (cycles and missing nodes) are saved in the importErrors map.
// Thus, even in the presence of both kinds of errors, the Import graph remains a DAG.
//
// visit returns whether the package needs src or has a transitive
// dependency on a package that does. These are the only packages
// for which we load source code.
var stack []*loaderPackage
var visit func(lpkg *loaderPackage) bool
var srcPkgs []*loaderPackage
visit = func(lpkg *loaderPackage) bool {
switch lpkg.color {
case black:
return lpkg.needsrc
case grey:
panic("internal error: grey node")
}
lpkg.color = grey
stack = append(stack, lpkg) // push
stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports
lpkg.Imports = make(map[string]*Package, len(stubs))
for importPath, ipkg := range stubs {
var importErr error
imp := ld.pkgs[ipkg.ID]
if imp == nil {
// (includes package "C" when DisableCgo)
importErr = fmt.Errorf("missing package: %q", ipkg.ID)
} else if imp.color == grey {
importErr = fmt.Errorf("import cycle: %s", stack)
}
if importErr != nil {
if lpkg.importErrors == nil {
lpkg.importErrors = make(map[string]error)
}
lpkg.importErrors[importPath] = importErr
continue
}
if visit(imp) {
lpkg.needsrc = true
}
lpkg.Imports[importPath] = imp.Package
}
if lpkg.needsrc {
srcPkgs = append(srcPkgs, lpkg)
}
stack = stack[:len(stack)-1] // pop
lpkg.color = black
return lpkg.needsrc
}
if ld.Mode < LoadImports {
//we do this to drop the stub import packages that we are not even going to try to resolve
for _, lpkg := range initial {
lpkg.Imports = nil
}
} else {
// For each initial package, create its import DAG.
for _, lpkg := range initial {
visit(lpkg)
}
}
for _, lpkg := range srcPkgs {
// Complete type information is required for the
// immediate dependencies of each source package.
for _, ipkg := range lpkg.Imports {
imp := ld.pkgs[ipkg.ID]
imp.needtypes = true
}
}
// Load type data if needed, starting at
// the initial packages (roots of the import DAG).
if ld.Mode >= LoadTypes {
var wg sync.WaitGroup
for _, lpkg := range initial {
wg.Add(1)
go func(lpkg *loaderPackage) {
ld.loadRecursive(lpkg)
wg.Done()
}(lpkg)
}
wg.Wait()
}
result := make([]*Package, len(initial))
for i, lpkg := range initial {
result[i] = lpkg.Package
}
return result, nil
}
// loadRecursive loads the specified package and its dependencies,
// recursively, in parallel, in topological order.
// It is atomic and idempotent.
// Precondition: ld.Mode >= LoadTypes.
func (ld *loader) loadRecursive(lpkg *loaderPackage) {
lpkg.loadOnce.Do(func() {
// Load the direct dependencies, in parallel.
var wg sync.WaitGroup
for _, ipkg := range lpkg.Imports {
imp := ld.pkgs[ipkg.ID]
wg.Add(1)
go func(imp *loaderPackage) {
ld.loadRecursive(imp)
wg.Done()
}(imp)
}
wg.Wait()
ld.loadPackage(lpkg)
})
}
// loadPackage loads the specified package.
// It must be called only once per Package,
// after immediate dependencies are loaded.
// Precondition: ld.Mode >= LoadTypes.
func (ld *loader) loadPackage(lpkg *loaderPackage) {
if lpkg.PkgPath == "unsafe" {
// Fill in the blanks to avoid surprises.
lpkg.Types = types.Unsafe
lpkg.Fset = ld.Fset
lpkg.Syntax = []*ast.File{}
lpkg.TypesInfo = new(types.Info)
return
}
// Call NewPackage directly with explicit name.
// This avoids skew between golist and go/types when the files'
// package declarations are inconsistent.
lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name)
lpkg.Fset = ld.Fset
// Subtle: we populate all Types fields with an empty Package
// before loading export data so that export data processing
// never has to create a types.Package for an indirect dependency,
// which would then require that such created packages be explicitly
// inserted back into the Import graph as a final step after export data loading.
// The Diamond test exercises this case.
if !lpkg.needtypes {
return
}
if !lpkg.needsrc {
ld.loadFromExportData(lpkg)
return // not a source package, don't get syntax trees
}
appendError := func(err error) {
// Convert various error types into the one true Error.
var errs []Error
switch err := err.(type) {
case Error:
// from driver
errs = append(errs, err)
case *os.PathError:
// from parser
errs = append(errs, Error{
Pos: err.Path + ":1",
Msg: err.Err.Error(),
Kind: ParseError,
})
case scanner.ErrorList:
// from parser
for _, err := range err {
errs = append(errs, Error{
Pos: err.Pos.String(),
Msg: err.Msg,
Kind: ParseError,
})
}
case types.Error:
// from type checker
errs = append(errs, Error{
Pos: err.Fset.Position(err.Pos).String(),
Msg: err.Msg,
Kind: TypeError,
})
default:
// unexpected impoverished error from parser?
errs = append(errs, Error{
Pos: "-",
Msg: err.Error(),
Kind: UnknownError,
})
// If you see this error message, please file a bug.
log.Printf("internal error: error %q (%T) without position", err, err)
}
lpkg.Errors = append(lpkg.Errors, errs...)
}
files, errs := ld.parseFiles(lpkg.CompiledGoFiles)
for _, err := range errs {
appendError(err)
}
lpkg.Syntax = files
lpkg.TypesInfo = &types.Info{
Types: make(map[ast.Expr]types.TypeAndValue),
Defs: make(map[*ast.Ident]types.Object),
Uses: make(map[*ast.Ident]types.Object),
Implicits: make(map[ast.Node]types.Object),
Scopes: make(map[ast.Node]*types.Scope),
Selections: make(map[*ast.SelectorExpr]*types.Selection),
}
importer := importerFunc(func(path string) (*types.Package, error) {
if path == "unsafe" {
return types.Unsafe, nil
}
// The imports map is keyed by import path.
ipkg := lpkg.Imports[path]
if ipkg == nil {
if err := lpkg.importErrors[path]; err != nil {
return nil, err
}
// There was skew between the metadata and the
// import declarations, likely due to an edit
// race, or because the ParseFile feature was
// used to supply alternative file contents.
return nil, fmt.Errorf("no metadata for %s", path)
}
if ipkg.Types != nil && ipkg.Types.Complete() {
return ipkg.Types, nil
}
log.Fatalf("internal error: nil Pkg importing %q from %q", path, lpkg)
panic("unreachable")
})
// This is only an approximation.
// TODO(adonovan): derive Sizes from the underlying build system.
goarch := runtime.GOARCH
const goarchPrefix = "GOARCH="
for _, e := range ld.Config.Env {
if strings.HasPrefix(e, goarchPrefix) {
goarch = e[len(goarchPrefix):]
}
}
sizes := types.SizesFor("gc", goarch)
// type-check
tc := &types.Config{
Importer: importer,
// Type-check bodies of functions only in non-initial packages.
// Example: for import graph A->B->C and initial packages {A,C},
// we can ignore function bodies in B.
IgnoreFuncBodies: ld.Mode < LoadAllSyntax && !lpkg.initial,
Error: appendError,
Sizes: sizes,
}
types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax)
lpkg.importErrors = nil // no longer needed
// If !Cgo, the type-checker uses FakeImportC mode, so
// it doesn't invoke the importer for import "C",
// nor report an error for the import,
// or for any undefined C.f reference.
// We must detect this explicitly and correctly
// mark the package as IllTyped (by reporting an error).
// TODO(adonovan): if these errors are annoying,
// we could just set IllTyped quietly.
if tc.FakeImportC {
outer:
for _, f := range lpkg.Syntax {
for _, imp := range f.Imports {
if imp.Path.Value == `"C"` {
err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`}
appendError(err)
break outer
}
}
}
}
// Record accumulated errors.
illTyped := len(lpkg.Errors) > 0
if !illTyped {
for _, imp := range lpkg.Imports {
if imp.IllTyped {
illTyped = true
break
}
}
}
lpkg.IllTyped = illTyped
}
// An importFunc is an implementation of the single-method
// types.Importer interface based on a function value.
type importerFunc func(path string) (*types.Package, error)
func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
// We use a counting semaphore to limit
// the number of parallel I/O calls per process.
var ioLimit = make(chan bool, 20)
// parseFiles reads and parses the Go source files and returns the ASTs
// of the ones that could be at least partially parsed, along with a
// list of I/O and parse errors encountered.
//
// Because files are scanned in parallel, the token.Pos
// positions of the resulting ast.Files are not ordered.
//
func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) {
var wg sync.WaitGroup
n := len(filenames)
parsed := make([]*ast.File, n)
errors := make([]error, n)
for i, file := range filenames {
wg.Add(1)
go func(i int, filename string) {
ioLimit <- true // wait
// ParseFile may return both an AST and an error.
var src []byte
for f, contents := range ld.Config.Overlay {
if sameFile(f, filename) {
src = contents
}
}
var err error
if src == nil {
src, err = ioutil.ReadFile(filename)
}
if err != nil {
parsed[i], errors[i] = nil, err
} else {
parsed[i], errors[i] = ld.ParseFile(ld.Fset, filename, src)
}
<-ioLimit // signal
wg.Done()
}(i, file)
}
wg.Wait()
// Eliminate nils, preserving order.
var o int
for _, f := range parsed {
if f != nil {
parsed[o] = f
o++
}
}
parsed = parsed[:o]
o = 0
for _, err := range errors {
if err != nil {
errors[o] = err
o++
}
}
errors = errors[:o]
return parsed, errors
}
// sameFile returns true if x and y have the same basename and denote
// the same file.
//
func sameFile(x, y string) bool {
if filepath.Base(x) == filepath.Base(y) { // (optimisation)
if xi, err := os.Stat(x); err == nil {
if yi, err := os.Stat(y); err == nil {
return os.SameFile(xi, yi)
}
}
}
return false
}
// loadFromExportData returns type information for the specified
// package, loading it from an export data file on the first request.
func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) {
if lpkg.PkgPath == "" {
log.Fatalf("internal error: Package %s has no PkgPath", lpkg)
}
// Because gcexportdata.Read has the potential to create or
// modify the types.Package for each node in the transitive
// closure of dependencies of lpkg, all exportdata operations
// must be sequential. (Finer-grained locking would require
// changes to the gcexportdata API.)
//
// The exportMu lock guards the Package.Pkg field and the
// types.Package it points to, for each Package in the graph.
//
// Not all accesses to Package.Pkg need to be protected by exportMu:
// graph ordering ensures that direct dependencies of source
// packages are fully loaded before the importer reads their Pkg field.
ld.exportMu.Lock()
defer ld.exportMu.Unlock()
if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() {
return tpkg, nil // cache hit
}
lpkg.IllTyped = true // fail safe
if lpkg.ExportFile == "" {
// Errors while building export data will have been printed to stderr.
return nil, fmt.Errorf("no export data file")
}
f, err := os.Open(lpkg.ExportFile)
if err != nil {
return nil, err
}
defer f.Close()
// Read gc export data.
//
// We don't currently support gccgo export data because all
// underlying workspaces use the gc toolchain. (Even build
// systems that support gccgo don't use it for workspace
// queries.)
r, err := gcexportdata.NewReader(f)
if err != nil {
return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
}
// Build the view.
//
// The gcexportdata machinery has no concept of package ID.
// It identifies packages by their PkgPath, which although not
// globally unique is unique within the scope of one invocation
// of the linker, type-checker, or gcexportdata.
//
// So, we must build a PkgPath-keyed view of the global
// (conceptually ID-keyed) cache of packages and pass it to
// gcexportdata. The view must contain every existing
// package that might possibly be mentioned by the
// current package---its transitive closure.
//
// In loadPackage, we unconditionally create a types.Package for
// each dependency so that export data loading does not
// create new ones.
//
// TODO(adonovan): it would be simpler and more efficient
// if the export data machinery invoked a callback to
// get-or-create a package instead of a map.
//
view := make(map[string]*types.Package) // view seen by gcexportdata
seen := make(map[*loaderPackage]bool) // all visited packages
var visit func(pkgs map[string]*Package)
visit = func(pkgs map[string]*Package) {
for _, p := range pkgs {
lpkg := ld.pkgs[p.ID]
if !seen[lpkg] {
seen[lpkg] = true
view[lpkg.PkgPath] = lpkg.Types
visit(lpkg.Imports)
}
}
}
visit(lpkg.Imports)
viewLen := len(view) + 1 // adding the self package
// Parse the export data.
// (May modify incomplete packages in view but not create new ones.)
tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath)
if err != nil {
return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
}
if viewLen != len(view) {
log.Fatalf("Unexpected package creation during export data loading")
}
lpkg.Types = tpkg
lpkg.IllTyped = false
return tpkg, nil
}
func usesExportData(cfg *Config) bool {
return LoadTypes <= cfg.Mode && cfg.Mode < LoadAllSyntax
}

View file

@ -1,55 +0,0 @@
package packages
import (
"fmt"
"os"
"sort"
)
// Visit visits all the packages in the import graph whose roots are
// pkgs, calling the optional pre function the first time each package
// is encountered (preorder), and the optional post function after a
// package's dependencies have been visited (postorder).
// The boolean result of pre(pkg) determines whether
// the imports of package pkg are visited.
func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) {
seen := make(map[*Package]bool)
var visit func(*Package)
visit = func(pkg *Package) {
if !seen[pkg] {
seen[pkg] = true
if pre == nil || pre(pkg) {
paths := make([]string, 0, len(pkg.Imports))
for path := range pkg.Imports {
paths = append(paths, path)
}
sort.Strings(paths) // for determinism
for _, path := range paths {
visit(pkg.Imports[path])
}
}
if post != nil {
post(pkg)
}
}
}
for _, pkg := range pkgs {
visit(pkg)
}
}
// PrintErrors prints to os.Stderr the accumulated errors of all
// packages in the import graph rooted at pkgs, dependencies first.
// PrintErrors returns the number of errors printed.
func PrintErrors(pkgs []*Package) int {
var n int
Visit(pkgs, nil, func(pkg *Package) {
for _, err := range pkg.Errors {
fmt.Fprintln(os.Stderr, err)
n++
}
})
return n
}

View file

@ -1,46 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package typeutil
import (
"go/ast"
"go/types"
"golang.org/x/tools/go/ast/astutil"
)
// Callee returns the named target of a function call, if any:
// a function, method, builtin, or variable.
func Callee(info *types.Info, call *ast.CallExpr) types.Object {
var obj types.Object
switch fun := astutil.Unparen(call.Fun).(type) {
case *ast.Ident:
obj = info.Uses[fun] // type, var, builtin, or declared func
case *ast.SelectorExpr:
if sel, ok := info.Selections[fun]; ok {
obj = sel.Obj() // method or field
} else {
obj = info.Uses[fun.Sel] // qualified identifier?
}
}
if _, ok := obj.(*types.TypeName); ok {
return nil // T(x) is a conversion, not a call
}
return obj
}
// StaticCallee returns the target (function or method) of a static
// function call, if any. It returns nil for calls to builtins.
func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func {
if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) {
return f
}
return nil
}
func interfaceMethod(f *types.Func) bool {
recv := f.Type().(*types.Signature).Recv()
return recv != nil && types.IsInterface(recv.Type())
}

View file

@ -1,31 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package typeutil
import "go/types"
// Dependencies returns all dependencies of the specified packages.
//
// Dependent packages appear in topological order: if package P imports
// package Q, Q appears earlier than P in the result.
// The algorithm follows import statements in the order they
// appear in the source code, so the result is a total order.
//
func Dependencies(pkgs ...*types.Package) []*types.Package {
var result []*types.Package
seen := make(map[*types.Package]bool)
var visit func(pkgs []*types.Package)
visit = func(pkgs []*types.Package) {
for _, p := range pkgs {
if !seen[p] {
seen[p] = true
visit(p.Imports())
result = append(result, p)
}
}
}
visit(pkgs)
return result
}

View file

@ -1,313 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package typeutil defines various utilities for types, such as Map,
// a mapping from types.Type to interface{} values.
package typeutil // import "golang.org/x/tools/go/types/typeutil"
import (
"bytes"
"fmt"
"go/types"
"reflect"
)
// Map is a hash-table-based mapping from types (types.Type) to
// arbitrary interface{} values. The concrete types that implement
// the Type interface are pointers. Since they are not canonicalized,
// == cannot be used to check for equivalence, and thus we cannot
// simply use a Go map.
//
// Just as with map[K]V, a nil *Map is a valid empty map.
//
// Not thread-safe.
//
type Map struct {
hasher Hasher // shared by many Maps
table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
length int // number of map entries
}
// entry is an entry (key/value association) in a hash bucket.
type entry struct {
key types.Type
value interface{}
}
// SetHasher sets the hasher used by Map.
//
// All Hashers are functionally equivalent but contain internal state
// used to cache the results of hashing previously seen types.
//
// A single Hasher created by MakeHasher() may be shared among many
// Maps. This is recommended if the instances have many keys in
// common, as it will amortize the cost of hash computation.
//
// A Hasher may grow without bound as new types are seen. Even when a
// type is deleted from the map, the Hasher never shrinks, since other
// types in the map may reference the deleted type indirectly.
//
// Hashers are not thread-safe, and read-only operations such as
// Map.Lookup require updates to the hasher, so a full Mutex lock (not a
// read-lock) is require around all Map operations if a shared
// hasher is accessed from multiple threads.
//
// If SetHasher is not called, the Map will create a private hasher at
// the first call to Insert.
//
func (m *Map) SetHasher(hasher Hasher) {
m.hasher = hasher
}
// Delete removes the entry with the given key, if any.
// It returns true if the entry was found.
//
func (m *Map) Delete(key types.Type) bool {
if m != nil && m.table != nil {
hash := m.hasher.Hash(key)
bucket := m.table[hash]
for i, e := range bucket {
if e.key != nil && types.Identical(key, e.key) {
// We can't compact the bucket as it
// would disturb iterators.
bucket[i] = entry{}
m.length--
return true
}
}
}
return false
}
// At returns the map entry for the given key.
// The result is nil if the entry is not present.
//
func (m *Map) At(key types.Type) interface{} {
if m != nil && m.table != nil {
for _, e := range m.table[m.hasher.Hash(key)] {
if e.key != nil && types.Identical(key, e.key) {
return e.value
}
}
}
return nil
}
// Set sets the map entry for key to val,
// and returns the previous entry, if any.
func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) {
if m.table != nil {
hash := m.hasher.Hash(key)
bucket := m.table[hash]
var hole *entry
for i, e := range bucket {
if e.key == nil {
hole = &bucket[i]
} else if types.Identical(key, e.key) {
prev = e.value
bucket[i].value = value
return
}
}
if hole != nil {
*hole = entry{key, value} // overwrite deleted entry
} else {
m.table[hash] = append(bucket, entry{key, value})
}
} else {
if m.hasher.memo == nil {
m.hasher = MakeHasher()
}
hash := m.hasher.Hash(key)
m.table = map[uint32][]entry{hash: {entry{key, value}}}
}
m.length++
return
}
// Len returns the number of map entries.
func (m *Map) Len() int {
if m != nil {
return m.length
}
return 0
}
// Iterate calls function f on each entry in the map in unspecified order.
//
// If f should mutate the map, Iterate provides the same guarantees as
// Go maps: if f deletes a map entry that Iterate has not yet reached,
// f will not be invoked for it, but if f inserts a map entry that
// Iterate has not yet reached, whether or not f will be invoked for
// it is unspecified.
//
func (m *Map) Iterate(f func(key types.Type, value interface{})) {
if m != nil {
for _, bucket := range m.table {
for _, e := range bucket {
if e.key != nil {
f(e.key, e.value)
}
}
}
}
}
// Keys returns a new slice containing the set of map keys.
// The order is unspecified.
func (m *Map) Keys() []types.Type {
keys := make([]types.Type, 0, m.Len())
m.Iterate(func(key types.Type, _ interface{}) {
keys = append(keys, key)
})
return keys
}
func (m *Map) toString(values bool) string {
if m == nil {
return "{}"
}
var buf bytes.Buffer
fmt.Fprint(&buf, "{")
sep := ""
m.Iterate(func(key types.Type, value interface{}) {
fmt.Fprint(&buf, sep)
sep = ", "
fmt.Fprint(&buf, key)
if values {
fmt.Fprintf(&buf, ": %q", value)
}
})
fmt.Fprint(&buf, "}")
return buf.String()
}
// String returns a string representation of the map's entries.
// Values are printed using fmt.Sprintf("%v", v).
// Order is unspecified.
//
func (m *Map) String() string {
return m.toString(true)
}
// KeysString returns a string representation of the map's key set.
// Order is unspecified.
//
func (m *Map) KeysString() string {
return m.toString(false)
}
////////////////////////////////////////////////////////////////////////
// Hasher
// A Hasher maps each type to its hash value.
// For efficiency, a hasher uses memoization; thus its memory
// footprint grows monotonically over time.
// Hashers are not thread-safe.
// Hashers have reference semantics.
// Call MakeHasher to create a Hasher.
type Hasher struct {
memo map[types.Type]uint32
}
// MakeHasher returns a new Hasher instance.
func MakeHasher() Hasher {
return Hasher{make(map[types.Type]uint32)}
}
// Hash computes a hash value for the given type t such that
// Identical(t, t') => Hash(t) == Hash(t').
func (h Hasher) Hash(t types.Type) uint32 {
hash, ok := h.memo[t]
if !ok {
hash = h.hashFor(t)
h.memo[t] = hash
}
return hash
}
// hashString computes the FowlerNollVo hash of s.
func hashString(s string) uint32 {
var h uint32
for i := 0; i < len(s); i++ {
h ^= uint32(s[i])
h *= 16777619
}
return h
}
// hashFor computes the hash of t.
func (h Hasher) hashFor(t types.Type) uint32 {
// See Identical for rationale.
switch t := t.(type) {
case *types.Basic:
return uint32(t.Kind())
case *types.Array:
return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem())
case *types.Slice:
return 9049 + 2*h.Hash(t.Elem())
case *types.Struct:
var hash uint32 = 9059
for i, n := 0, t.NumFields(); i < n; i++ {
f := t.Field(i)
if f.Anonymous() {
hash += 8861
}
hash += hashString(t.Tag(i))
hash += hashString(f.Name()) // (ignore f.Pkg)
hash += h.Hash(f.Type())
}
return hash
case *types.Pointer:
return 9067 + 2*h.Hash(t.Elem())
case *types.Signature:
var hash uint32 = 9091
if t.Variadic() {
hash *= 8863
}
return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
case *types.Interface:
var hash uint32 = 9103
for i, n := 0, t.NumMethods(); i < n; i++ {
// See go/types.identicalMethods for rationale.
// Method order is not significant.
// Ignore m.Pkg().
m := t.Method(i)
hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type())
}
return hash
case *types.Map:
return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem())
case *types.Chan:
return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())
case *types.Named:
// Not safe with a copying GC; objects may move.
return uint32(reflect.ValueOf(t.Obj()).Pointer())
case *types.Tuple:
return h.hashTuple(t)
}
panic(t)
}
func (h Hasher) hashTuple(tuple *types.Tuple) uint32 {
// See go/types.identicalTypes for rationale.
n := tuple.Len()
var hash uint32 = 9137 + 2*uint32(n)
for i := 0; i < n; i++ {
hash += 3 * h.Hash(tuple.At(i).Type())
}
return hash
}

View file

@ -1,72 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements a cache of method sets.
package typeutil
import (
"go/types"
"sync"
)
// A MethodSetCache records the method set of each type T for which
// MethodSet(T) is called so that repeat queries are fast.
// The zero value is a ready-to-use cache instance.
type MethodSetCache struct {
mu sync.Mutex
named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N
others map[types.Type]*types.MethodSet // all other types
}
// MethodSet returns the method set of type T. It is thread-safe.
//
// If cache is nil, this function is equivalent to types.NewMethodSet(T).
// Utility functions can thus expose an optional *MethodSetCache
// parameter to clients that care about performance.
//
func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet {
if cache == nil {
return types.NewMethodSet(T)
}
cache.mu.Lock()
defer cache.mu.Unlock()
switch T := T.(type) {
case *types.Named:
return cache.lookupNamed(T).value
case *types.Pointer:
if N, ok := T.Elem().(*types.Named); ok {
return cache.lookupNamed(N).pointer
}
}
// all other types
// (The map uses pointer equivalence, not type identity.)
mset := cache.others[T]
if mset == nil {
mset = types.NewMethodSet(T)
if cache.others == nil {
cache.others = make(map[types.Type]*types.MethodSet)
}
cache.others[T] = mset
}
return mset
}
func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } {
if cache.named == nil {
cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet })
}
// Avoid recomputing mset(*T) for each distinct Pointer
// instance whose underlying type is a named type.
msets, ok := cache.named[named]
if !ok {
msets.value = types.NewMethodSet(named)
msets.pointer = types.NewMethodSet(types.NewPointer(named))
cache.named[named] = msets
}
return msets
}

View file

@ -1,52 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package typeutil
// This file defines utilities for user interfaces that display types.
import "go/types"
// IntuitiveMethodSet returns the intuitive method set of a type T,
// which is the set of methods you can call on an addressable value of
// that type.
//
// The result always contains MethodSet(T), and is exactly MethodSet(T)
// for interface types and for pointer-to-concrete types.
// For all other concrete types T, the result additionally
// contains each method belonging to *T if there is no identically
// named method on T itself.
//
// This corresponds to user intuition about method sets;
// this function is intended only for user interfaces.
//
// The order of the result is as for types.MethodSet(T).
//
func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
isPointerToConcrete := func(T types.Type) bool {
ptr, ok := T.(*types.Pointer)
return ok && !types.IsInterface(ptr.Elem())
}
var result []*types.Selection
mset := msets.MethodSet(T)
if types.IsInterface(T) || isPointerToConcrete(T) {
for i, n := 0, mset.Len(); i < n; i++ {
result = append(result, mset.At(i))
}
} else {
// T is some other concrete type.
// Report methods of T and *T, preferring those of T.
pmset := msets.MethodSet(types.NewPointer(T))
for i, n := 0, pmset.Len(); i < n; i++ {
meth := pmset.At(i)
if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil {
meth = m
}
result = append(result, meth)
}
}
return result
}

View file

@ -1,196 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package fastwalk provides a faster version of filepath.Walk for file system
// scanning tools.
package fastwalk
import (
"errors"
"os"
"path/filepath"
"runtime"
"sync"
)
// TraverseLink is used as a return value from WalkFuncs to indicate that the
// symlink named in the call may be traversed.
var TraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory")
// SkipFiles is a used as a return value from WalkFuncs to indicate that the
// callback should not be called for any other files in the current directory.
// Child directories will still be traversed.
var SkipFiles = errors.New("fastwalk: skip remaining files in directory")
// Walk is a faster implementation of filepath.Walk.
//
// filepath.Walk's design necessarily calls os.Lstat on each file,
// even if the caller needs less info.
// Many tools need only the type of each file.
// On some platforms, this information is provided directly by the readdir
// system call, avoiding the need to stat each file individually.
// fastwalk_unix.go contains a fork of the syscall routines.
//
// See golang.org/issue/16399
//
// Walk walks the file tree rooted at root, calling walkFn for
// each file or directory in the tree, including root.
//
// If fastWalk returns filepath.SkipDir, the directory is skipped.
//
// Unlike filepath.Walk:
// * file stat calls must be done by the user.
// The only provided metadata is the file type, which does not include
// any permission bits.
// * multiple goroutines stat the filesystem concurrently. The provided
// walkFn must be safe for concurrent use.
// * fastWalk can follow symlinks if walkFn returns the TraverseLink
// sentinel error. It is the walkFn's responsibility to prevent
// fastWalk from going into symlink cycles.
func Walk(root string, walkFn func(path string, typ os.FileMode) error) error {
// TODO(bradfitz): make numWorkers configurable? We used a
// minimum of 4 to give the kernel more info about multiple
// things we want, in hopes its I/O scheduling can take
// advantage of that. Hopefully most are in cache. Maybe 4 is
// even too low of a minimum. Profile more.
numWorkers := 4
if n := runtime.NumCPU(); n > numWorkers {
numWorkers = n
}
// Make sure to wait for all workers to finish, otherwise
// walkFn could still be called after returning. This Wait call
// runs after close(e.donec) below.
var wg sync.WaitGroup
defer wg.Wait()
w := &walker{
fn: walkFn,
enqueuec: make(chan walkItem, numWorkers), // buffered for performance
workc: make(chan walkItem, numWorkers), // buffered for performance
donec: make(chan struct{}),
// buffered for correctness & not leaking goroutines:
resc: make(chan error, numWorkers),
}
defer close(w.donec)
for i := 0; i < numWorkers; i++ {
wg.Add(1)
go w.doWork(&wg)
}
todo := []walkItem{{dir: root}}
out := 0
for {
workc := w.workc
var workItem walkItem
if len(todo) == 0 {
workc = nil
} else {
workItem = todo[len(todo)-1]
}
select {
case workc <- workItem:
todo = todo[:len(todo)-1]
out++
case it := <-w.enqueuec:
todo = append(todo, it)
case err := <-w.resc:
out--
if err != nil {
return err
}
if out == 0 && len(todo) == 0 {
// It's safe to quit here, as long as the buffered
// enqueue channel isn't also readable, which might
// happen if the worker sends both another unit of
// work and its result before the other select was
// scheduled and both w.resc and w.enqueuec were
// readable.
select {
case it := <-w.enqueuec:
todo = append(todo, it)
default:
return nil
}
}
}
}
}
// doWork reads directories as instructed (via workc) and runs the
// user's callback function.
func (w *walker) doWork(wg *sync.WaitGroup) {
defer wg.Done()
for {
select {
case <-w.donec:
return
case it := <-w.workc:
select {
case <-w.donec:
return
case w.resc <- w.walk(it.dir, !it.callbackDone):
}
}
}
}
type walker struct {
fn func(path string, typ os.FileMode) error
donec chan struct{} // closed on fastWalk's return
workc chan walkItem // to workers
enqueuec chan walkItem // from workers
resc chan error // from workers
}
type walkItem struct {
dir string
callbackDone bool // callback already called; don't do it again
}
func (w *walker) enqueue(it walkItem) {
select {
case w.enqueuec <- it:
case <-w.donec:
}
}
func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error {
joined := dirName + string(os.PathSeparator) + baseName
if typ == os.ModeDir {
w.enqueue(walkItem{dir: joined})
return nil
}
err := w.fn(joined, typ)
if typ == os.ModeSymlink {
if err == TraverseLink {
// Set callbackDone so we don't call it twice for both the
// symlink-as-symlink and the symlink-as-directory later:
w.enqueue(walkItem{dir: joined, callbackDone: true})
return nil
}
if err == filepath.SkipDir {
// Permit SkipDir on symlinks too.
return nil
}
}
return err
}
func (w *walker) walk(root string, runUserCallback bool) error {
if runUserCallback {
err := w.fn(root, os.ModeDir)
if err == filepath.SkipDir {
return nil
}
if err != nil {
return err
}
}
return readDir(root, w.onDirEnt)
}

View file

@ -1,13 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build freebsd openbsd netbsd
package fastwalk
import "syscall"
func direntInode(dirent *syscall.Dirent) uint64 {
return uint64(dirent.Fileno)
}

View file

@ -1,14 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux darwin
// +build !appengine
package fastwalk
import "syscall"
func direntInode(dirent *syscall.Dirent) uint64 {
return uint64(dirent.Ino)
}

View file

@ -1,13 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin freebsd openbsd netbsd
package fastwalk
import "syscall"
func direntNamlen(dirent *syscall.Dirent) uint64 {
return uint64(dirent.Namlen)
}

View file

@ -1,24 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
// +build !appengine
package fastwalk
import (
"bytes"
"syscall"
"unsafe"
)
func direntNamlen(dirent *syscall.Dirent) uint64 {
const fixedHdr = uint16(unsafe.Offsetof(syscall.Dirent{}.Name))
nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0]))
nameLen := bytes.IndexByte(nameBuf[:dirent.Reclen-fixedHdr], 0)
if nameLen < 0 {
panic("failed to find terminating 0 byte in dirent")
}
return uint64(nameLen)
}

View file

@ -1,37 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build appengine !linux,!darwin,!freebsd,!openbsd,!netbsd
package fastwalk
import (
"io/ioutil"
"os"
)
// readDir calls fn for each directory entry in dirName.
// It does not descend into directories or follow symlinks.
// If fn returns a non-nil error, readDir returns with that error
// immediately.
func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error {
fis, err := ioutil.ReadDir(dirName)
if err != nil {
return err
}
skipFiles := false
for _, fi := range fis {
if fi.Mode().IsRegular() && skipFiles {
continue
}
if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil {
if err == SkipFiles {
skipFiles = true
continue
}
return err
}
}
return nil
}

View file

@ -1,127 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux darwin freebsd openbsd netbsd
// +build !appengine
package fastwalk
import (
"fmt"
"os"
"syscall"
"unsafe"
)
const blockSize = 8 << 10
// unknownFileMode is a sentinel (and bogus) os.FileMode
// value used to represent a syscall.DT_UNKNOWN Dirent.Type.
const unknownFileMode os.FileMode = os.ModeNamedPipe | os.ModeSocket | os.ModeDevice
func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) error) error {
fd, err := syscall.Open(dirName, 0, 0)
if err != nil {
return &os.PathError{Op: "open", Path: dirName, Err: err}
}
defer syscall.Close(fd)
// The buffer must be at least a block long.
buf := make([]byte, blockSize) // stack-allocated; doesn't escape
bufp := 0 // starting read position in buf
nbuf := 0 // end valid data in buf
skipFiles := false
for {
if bufp >= nbuf {
bufp = 0
nbuf, err = syscall.ReadDirent(fd, buf)
if err != nil {
return os.NewSyscallError("readdirent", err)
}
if nbuf <= 0 {
return nil
}
}
consumed, name, typ := parseDirEnt(buf[bufp:nbuf])
bufp += consumed
if name == "" || name == "." || name == ".." {
continue
}
// Fallback for filesystems (like old XFS) that don't
// support Dirent.Type and have DT_UNKNOWN (0) there
// instead.
if typ == unknownFileMode {
fi, err := os.Lstat(dirName + "/" + name)
if err != nil {
// It got deleted in the meantime.
if os.IsNotExist(err) {
continue
}
return err
}
typ = fi.Mode() & os.ModeType
}
if skipFiles && typ.IsRegular() {
continue
}
if err := fn(dirName, name, typ); err != nil {
if err == SkipFiles {
skipFiles = true
continue
}
return err
}
}
}
func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) {
// golang.org/issue/15653
dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v {
panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v))
}
if len(buf) < int(dirent.Reclen) {
panic(fmt.Sprintf("buf size %d < record length %d", len(buf), dirent.Reclen))
}
consumed = int(dirent.Reclen)
if direntInode(dirent) == 0 { // File absent in directory.
return
}
switch dirent.Type {
case syscall.DT_REG:
typ = 0
case syscall.DT_DIR:
typ = os.ModeDir
case syscall.DT_LNK:
typ = os.ModeSymlink
case syscall.DT_BLK:
typ = os.ModeDevice
case syscall.DT_FIFO:
typ = os.ModeNamedPipe
case syscall.DT_SOCK:
typ = os.ModeSocket
case syscall.DT_UNKNOWN:
typ = unknownFileMode
default:
// Skip weird things.
// It's probably a DT_WHT (http://lwn.net/Articles/325369/)
// or something. Revisit if/when this package is moved outside
// of goimports. goimports only cares about regular files,
// symlinks, and directories.
return
}
nameBuf := (*[unsafe.Sizeof(dirent.Name)]byte)(unsafe.Pointer(&dirent.Name[0]))
nameLen := direntNamlen(dirent)
// Special cases for common things:
if nameLen == 1 && nameBuf[0] == '.' {
name = "."
} else if nameLen == 2 && nameBuf[0] == '.' && nameBuf[1] == '.' {
name = ".."
} else {
name = string(nameBuf[:nameLen])
}
return
}

View file

@ -1,246 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gopathwalk is like filepath.Walk but specialized for finding Go
// packages, particularly in $GOPATH and $GOROOT.
package gopathwalk
import (
"bufio"
"bytes"
"fmt"
"go/build"
"golang.org/x/tools/internal/fastwalk"
"io/ioutil"
"log"
"os"
"path/filepath"
"strings"
)
// Options controls the behavior of a Walk call.
type Options struct {
Debug bool // Enable debug logging
ModulesEnabled bool // Search module caches. Also disables legacy goimports ignore rules.
}
// RootType indicates the type of a Root.
type RootType int
const (
RootUnknown RootType = iota
RootGOROOT
RootGOPATH
RootCurrentModule
RootModuleCache
)
// A Root is a starting point for a Walk.
type Root struct {
Path string
Type RootType
}
// SrcDirsRoots returns the roots from build.Default.SrcDirs(). Not modules-compatible.
func SrcDirsRoots() []Root {
var roots []Root
roots = append(roots, Root{filepath.Join(build.Default.GOROOT, "src"), RootGOROOT})
for _, p := range filepath.SplitList(build.Default.GOPATH) {
roots = append(roots, Root{filepath.Join(p, "src"), RootGOPATH})
}
return roots
}
// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages.
// For each package found, add will be called (concurrently) with the absolute
// paths of the containing source directory and the package directory.
// add will be called concurrently.
func Walk(roots []Root, add func(root Root, dir string), opts Options) {
for _, root := range roots {
walkDir(root, add, opts)
}
}
func walkDir(root Root, add func(Root, string), opts Options) {
if _, err := os.Stat(root.Path); os.IsNotExist(err) {
if opts.Debug {
log.Printf("skipping nonexistant directory: %v", root.Path)
}
return
}
if opts.Debug {
log.Printf("scanning %s", root.Path)
}
w := &walker{
root: root,
add: add,
opts: opts,
}
w.init()
if err := fastwalk.Walk(root.Path, w.walk); err != nil {
log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err)
}
if opts.Debug {
log.Printf("scanned %s", root.Path)
}
}
// walker is the callback for fastwalk.Walk.
type walker struct {
root Root // The source directory to scan.
add func(Root, string) // The callback that will be invoked for every possible Go package dir.
opts Options // Options passed to Walk by the user.
ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files.
}
// init initializes the walker based on its Options.
func (w *walker) init() {
var ignoredPaths []string
if w.root.Type == RootModuleCache {
ignoredPaths = []string{"cache"}
}
if !w.opts.ModulesEnabled && w.root.Type == RootGOPATH {
ignoredPaths = w.getIgnoredDirs(w.root.Path)
ignoredPaths = append(ignoredPaths, "v", "mod")
}
for _, p := range ignoredPaths {
full := filepath.Join(w.root.Path, p)
if fi, err := os.Stat(full); err == nil {
w.ignoredDirs = append(w.ignoredDirs, fi)
if w.opts.Debug {
log.Printf("Directory added to ignore list: %s", full)
}
} else if w.opts.Debug {
log.Printf("Error statting ignored directory: %v", err)
}
}
}
// getIgnoredDirs reads an optional config file at <path>/.goimportsignore
// of relative directories to ignore when scanning for go files.
// The provided path is one of the $GOPATH entries with "src" appended.
func (w *walker) getIgnoredDirs(path string) []string {
file := filepath.Join(path, ".goimportsignore")
slurp, err := ioutil.ReadFile(file)
if w.opts.Debug {
if err != nil {
log.Print(err)
} else {
log.Printf("Read %s", file)
}
}
if err != nil {
return nil
}
var ignoredDirs []string
bs := bufio.NewScanner(bytes.NewReader(slurp))
for bs.Scan() {
line := strings.TrimSpace(bs.Text())
if line == "" || strings.HasPrefix(line, "#") {
continue
}
ignoredDirs = append(ignoredDirs, line)
}
return ignoredDirs
}
func (w *walker) shouldSkipDir(fi os.FileInfo) bool {
for _, ignoredDir := range w.ignoredDirs {
if os.SameFile(fi, ignoredDir) {
return true
}
}
return false
}
func (w *walker) walk(path string, typ os.FileMode) error {
dir := filepath.Dir(path)
if typ.IsRegular() {
if dir == w.root.Path {
// Doesn't make sense to have regular files
// directly in your $GOPATH/src or $GOROOT/src.
return fastwalk.SkipFiles
}
if !strings.HasSuffix(path, ".go") {
return nil
}
w.add(w.root, dir)
return fastwalk.SkipFiles
}
if typ == os.ModeDir {
base := filepath.Base(path)
if base == "" || base[0] == '.' || base[0] == '_' ||
base == "testdata" || (!w.opts.ModulesEnabled && base == "node_modules") {
return filepath.SkipDir
}
fi, err := os.Lstat(path)
if err == nil && w.shouldSkipDir(fi) {
return filepath.SkipDir
}
return nil
}
if typ == os.ModeSymlink {
base := filepath.Base(path)
if strings.HasPrefix(base, ".#") {
// Emacs noise.
return nil
}
fi, err := os.Lstat(path)
if err != nil {
// Just ignore it.
return nil
}
if w.shouldTraverse(dir, fi) {
return fastwalk.TraverseLink
}
}
return nil
}
// shouldTraverse reports whether the symlink fi, found in dir,
// should be followed. It makes sure symlinks were never visited
// before to avoid symlink loops.
func (w *walker) shouldTraverse(dir string, fi os.FileInfo) bool {
path := filepath.Join(dir, fi.Name())
target, err := filepath.EvalSymlinks(path)
if err != nil {
return false
}
ts, err := os.Stat(target)
if err != nil {
fmt.Fprintln(os.Stderr, err)
return false
}
if !ts.IsDir() {
return false
}
if w.shouldSkipDir(ts) {
return false
}
// Check for symlink loops by statting each directory component
// and seeing if any are the same file as ts.
for {
parent := filepath.Dir(path)
if parent == path {
// Made it to the root without seeing a cycle.
// Use this symlink.
return true
}
parentInfo, err := os.Stat(parent)
if err != nil {
return false
}
if os.SameFile(ts, parentInfo) {
// Cycle. Don't traverse.
return false
}
path = parent
}
}

View file

@ -1,388 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package semver implements comparison of semantic version strings.
// In this package, semantic version strings must begin with a leading "v",
// as in "v1.0.0".
//
// The general form of a semantic version string accepted by this package is
//
// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]]
//
// where square brackets indicate optional parts of the syntax;
// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros;
// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers
// using only alphanumeric characters and hyphens; and
// all-numeric PRERELEASE identifiers must not have leading zeros.
//
// This package follows Semantic Versioning 2.0.0 (see semver.org)
// with two exceptions. First, it requires the "v" prefix. Second, it recognizes
// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes)
// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0.
package semver
// parsed returns the parsed form of a semantic version string.
type parsed struct {
major string
minor string
patch string
short string
prerelease string
build string
err string
}
// IsValid reports whether v is a valid semantic version string.
func IsValid(v string) bool {
_, ok := parse(v)
return ok
}
// Canonical returns the canonical formatting of the semantic version v.
// It fills in any missing .MINOR or .PATCH and discards build metadata.
// Two semantic versions compare equal only if their canonical formattings
// are identical strings.
// The canonical invalid semantic version is the empty string.
func Canonical(v string) string {
p, ok := parse(v)
if !ok {
return ""
}
if p.build != "" {
return v[:len(v)-len(p.build)]
}
if p.short != "" {
return v + p.short
}
return v
}
// Major returns the major version prefix of the semantic version v.
// For example, Major("v2.1.0") == "v2".
// If v is an invalid semantic version string, Major returns the empty string.
func Major(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
return v[:1+len(pv.major)]
}
// MajorMinor returns the major.minor version prefix of the semantic version v.
// For example, MajorMinor("v2.1.0") == "v2.1".
// If v is an invalid semantic version string, MajorMinor returns the empty string.
func MajorMinor(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
i := 1 + len(pv.major)
if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor {
return v[:j]
}
return v[:i] + "." + pv.minor
}
// Prerelease returns the prerelease suffix of the semantic version v.
// For example, Prerelease("v2.1.0-pre+meta") == "-pre".
// If v is an invalid semantic version string, Prerelease returns the empty string.
func Prerelease(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
return pv.prerelease
}
// Build returns the build suffix of the semantic version v.
// For example, Build("v2.1.0+meta") == "+meta".
// If v is an invalid semantic version string, Build returns the empty string.
func Build(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
return pv.build
}
// Compare returns an integer comparing two versions according to
// according to semantic version precedence.
// The result will be 0 if v == w, -1 if v < w, or +1 if v > w.
//
// An invalid semantic version string is considered less than a valid one.
// All invalid semantic version strings compare equal to each other.
func Compare(v, w string) int {
pv, ok1 := parse(v)
pw, ok2 := parse(w)
if !ok1 && !ok2 {
return 0
}
if !ok1 {
return -1
}
if !ok2 {
return +1
}
if c := compareInt(pv.major, pw.major); c != 0 {
return c
}
if c := compareInt(pv.minor, pw.minor); c != 0 {
return c
}
if c := compareInt(pv.patch, pw.patch); c != 0 {
return c
}
return comparePrerelease(pv.prerelease, pw.prerelease)
}
// Max canonicalizes its arguments and then returns the version string
// that compares greater.
func Max(v, w string) string {
v = Canonical(v)
w = Canonical(w)
if Compare(v, w) > 0 {
return v
}
return w
}
func parse(v string) (p parsed, ok bool) {
if v == "" || v[0] != 'v' {
p.err = "missing v prefix"
return
}
p.major, v, ok = parseInt(v[1:])
if !ok {
p.err = "bad major version"
return
}
if v == "" {
p.minor = "0"
p.patch = "0"
p.short = ".0.0"
return
}
if v[0] != '.' {
p.err = "bad minor prefix"
ok = false
return
}
p.minor, v, ok = parseInt(v[1:])
if !ok {
p.err = "bad minor version"
return
}
if v == "" {
p.patch = "0"
p.short = ".0"
return
}
if v[0] != '.' {
p.err = "bad patch prefix"
ok = false
return
}
p.patch, v, ok = parseInt(v[1:])
if !ok {
p.err = "bad patch version"
return
}
if len(v) > 0 && v[0] == '-' {
p.prerelease, v, ok = parsePrerelease(v)
if !ok {
p.err = "bad prerelease"
return
}
}
if len(v) > 0 && v[0] == '+' {
p.build, v, ok = parseBuild(v)
if !ok {
p.err = "bad build"
return
}
}
if v != "" {
p.err = "junk on end"
ok = false
return
}
ok = true
return
}
func parseInt(v string) (t, rest string, ok bool) {
if v == "" {
return
}
if v[0] < '0' || '9' < v[0] {
return
}
i := 1
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
i++
}
if v[0] == '0' && i != 1 {
return
}
return v[:i], v[i:], true
}
func parsePrerelease(v string) (t, rest string, ok bool) {
// "A pre-release version MAY be denoted by appending a hyphen and
// a series of dot separated identifiers immediately following the patch version.
// Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-].
// Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes."
if v == "" || v[0] != '-' {
return
}
i := 1
start := 1
for i < len(v) && v[i] != '+' {
if !isIdentChar(v[i]) && v[i] != '.' {
return
}
if v[i] == '.' {
if start == i || isBadNum(v[start:i]) {
return
}
start = i + 1
}
i++
}
if start == i || isBadNum(v[start:i]) {
return
}
return v[:i], v[i:], true
}
func parseBuild(v string) (t, rest string, ok bool) {
if v == "" || v[0] != '+' {
return
}
i := 1
start := 1
for i < len(v) {
if !isIdentChar(v[i]) {
return
}
if v[i] == '.' {
if start == i {
return
}
start = i + 1
}
i++
}
if start == i {
return
}
return v[:i], v[i:], true
}
func isIdentChar(c byte) bool {
return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-'
}
func isBadNum(v string) bool {
i := 0
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
i++
}
return i == len(v) && i > 1 && v[0] == '0'
}
func isNum(v string) bool {
i := 0
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
i++
}
return i == len(v)
}
func compareInt(x, y string) int {
if x == y {
return 0
}
if len(x) < len(y) {
return -1
}
if len(x) > len(y) {
return +1
}
if x < y {
return -1
} else {
return +1
}
}
func comparePrerelease(x, y string) int {
// "When major, minor, and patch are equal, a pre-release version has
// lower precedence than a normal version.
// Example: 1.0.0-alpha < 1.0.0.
// Precedence for two pre-release versions with the same major, minor,
// and patch version MUST be determined by comparing each dot separated
// identifier from left to right until a difference is found as follows:
// identifiers consisting of only digits are compared numerically and
// identifiers with letters or hyphens are compared lexically in ASCII
// sort order. Numeric identifiers always have lower precedence than
// non-numeric identifiers. A larger set of pre-release fields has a
// higher precedence than a smaller set, if all of the preceding
// identifiers are equal.
// Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta <
// 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0."
if x == y {
return 0
}
if x == "" {
return +1
}
if y == "" {
return -1
}
for x != "" && y != "" {
x = x[1:] // skip - or .
y = y[1:] // skip - or .
var dx, dy string
dx, x = nextIdent(x)
dy, y = nextIdent(y)
if dx != dy {
ix := isNum(dx)
iy := isNum(dy)
if ix != iy {
if ix {
return -1
} else {
return +1
}
}
if ix {
if len(dx) < len(dy) {
return -1
}
if len(dx) > len(dy) {
return +1
}
}
if dx < dy {
return -1
} else {
return +1
}
}
}
if x == "" {
return -1
} else {
return +1
}
}
func nextIdent(x string) (dx, rest string) {
i := 0
for i < len(x) && x[i] != '.' {
i++
}
return x[:i], x[i:]
}

View file

@ -1,20 +0,0 @@
Copyright (c) 2016 Dominik Honnef
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View file

@ -1,39 +0,0 @@
package arg
var args = map[string]int{
"(*sync.Pool).Put.x": 0,
"(*text/template.Template).Parse.text": 0,
"(io.Seeker).Seek.offset": 0,
"(time.Time).Sub.u": 0,
"append.elems": 1,
"append.slice": 0,
"bytes.Equal.a": 0,
"bytes.Equal.b": 1,
"encoding/binary.Write.data": 2,
"errors.New.text": 0,
"fmt.Printf.format": 0,
"fmt.Sprintf.a[0]": 1,
"fmt.Sprintf.format": 0,
"len.v": 0,
"make.size[0]": 1,
"make.size[1]": 2,
"make.t": 0,
"net/url.Parse.rawurl": 0,
"os.OpenFile.flag": 1,
"os/exec.Command.name": 0,
"os/signal.Notify.c": 0,
"regexp.Compile.expr": 0,
"runtime.SetFinalizer.finalizer": 1,
"runtime.SetFinalizer.obj": 0,
"sort.Sort.data": 0,
"time.Parse.layout": 0,
"time.Sleep.d": 0,
}
func Arg(name string) int {
n, ok := args[name]
if !ok {
panic("unknown argument " + name)
}
return n
}

View file

@ -1,129 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package callgraph defines the call graph and various algorithms
and utilities to operate on it.
A call graph is a labelled directed graph whose nodes represent
functions and whose edge labels represent syntactic function call
sites. The presence of a labelled edge (caller, site, callee)
indicates that caller may call callee at the specified call site.
A call graph is a multigraph: it may contain multiple edges (caller,
*, callee) connecting the same pair of nodes, so long as the edges
differ by label; this occurs when one function calls another function
from multiple call sites. Also, it may contain multiple edges
(caller, site, *) that differ only by callee; this indicates a
polymorphic call.
A SOUND call graph is one that overapproximates the dynamic calling
behaviors of the program in all possible executions. One call graph
is more PRECISE than another if it is a smaller overapproximation of
the dynamic behavior.
All call graphs have a synthetic root node which is responsible for
calling main() and init().
Calls to built-in functions (e.g. panic, println) are not represented
in the call graph; they are treated like built-in operators of the
language.
*/
package callgraph // import "honnef.co/go/tools/callgraph"
// TODO(adonovan): add a function to eliminate wrappers from the
// callgraph, preserving topology.
// More generally, we could eliminate "uninteresting" nodes such as
// nodes from packages we don't care about.
import (
"fmt"
"go/token"
"honnef.co/go/tools/ssa"
)
// A Graph represents a call graph.
//
// A graph may contain nodes that are not reachable from the root.
// If the call graph is sound, such nodes indicate unreachable
// functions.
//
type Graph struct {
Root *Node // the distinguished root node
Nodes map[*ssa.Function]*Node // all nodes by function
}
// New returns a new Graph with the specified root node.
func New(root *ssa.Function) *Graph {
g := &Graph{Nodes: make(map[*ssa.Function]*Node)}
g.Root = g.CreateNode(root)
return g
}
// CreateNode returns the Node for fn, creating it if not present.
func (g *Graph) CreateNode(fn *ssa.Function) *Node {
n, ok := g.Nodes[fn]
if !ok {
n = &Node{Func: fn, ID: len(g.Nodes)}
g.Nodes[fn] = n
}
return n
}
// A Node represents a node in a call graph.
type Node struct {
Func *ssa.Function // the function this node represents
ID int // 0-based sequence number
In []*Edge // unordered set of incoming call edges (n.In[*].Callee == n)
Out []*Edge // unordered set of outgoing call edges (n.Out[*].Caller == n)
}
func (n *Node) String() string {
return fmt.Sprintf("n%d:%s", n.ID, n.Func)
}
// A Edge represents an edge in the call graph.
//
// Site is nil for edges originating in synthetic or intrinsic
// functions, e.g. reflect.Call or the root of the call graph.
type Edge struct {
Caller *Node
Site ssa.CallInstruction
Callee *Node
}
func (e Edge) String() string {
return fmt.Sprintf("%s --> %s", e.Caller, e.Callee)
}
func (e Edge) Description() string {
var prefix string
switch e.Site.(type) {
case nil:
return "synthetic call"
case *ssa.Go:
prefix = "concurrent "
case *ssa.Defer:
prefix = "deferred "
}
return prefix + e.Site.Common().Description()
}
func (e Edge) Pos() token.Pos {
if e.Site == nil {
return token.NoPos
}
return e.Site.Pos()
}
// AddEdge adds the edge (caller, site, callee) to the call graph.
// Elimination of duplicate edges is the caller's responsibility.
func AddEdge(caller *Node, site ssa.CallInstruction, callee *Node) {
e := &Edge{caller, site, callee}
callee.In = append(callee.In, e)
caller.Out = append(caller.Out, e)
}

View file

@ -1,35 +0,0 @@
// Package static computes the call graph of a Go program containing
// only static call edges.
package static // import "honnef.co/go/tools/callgraph/static"
import (
"honnef.co/go/tools/callgraph"
"honnef.co/go/tools/ssa"
"honnef.co/go/tools/ssa/ssautil"
)
// CallGraph computes the call graph of the specified program
// considering only static calls.
//
func CallGraph(prog *ssa.Program) *callgraph.Graph {
cg := callgraph.New(nil) // TODO(adonovan) eliminate concept of rooted callgraph
// TODO(adonovan): opt: use only a single pass over the ssa.Program.
// TODO(adonovan): opt: this is slower than RTA (perhaps because
// the lower precision means so many edges are allocated)!
for f := range ssautil.AllFunctions(prog) {
fnode := cg.CreateNode(f)
for _, b := range f.Blocks {
for _, instr := range b.Instrs {
if site, ok := instr.(ssa.CallInstruction); ok {
if g := site.Common().StaticCallee(); g != nil {
gnode := cg.CreateNode(g)
callgraph.AddEdge(fnode, site, gnode)
}
}
}
}
}
return cg
}

View file

@ -1,181 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package callgraph
import "honnef.co/go/tools/ssa"
// This file provides various utilities over call graphs, such as
// visitation and path search.
// CalleesOf returns a new set containing all direct callees of the
// caller node.
//
func CalleesOf(caller *Node) map[*Node]bool {
callees := make(map[*Node]bool)
for _, e := range caller.Out {
callees[e.Callee] = true
}
return callees
}
// GraphVisitEdges visits all the edges in graph g in depth-first order.
// The edge function is called for each edge in postorder. If it
// returns non-nil, visitation stops and GraphVisitEdges returns that
// value.
//
func GraphVisitEdges(g *Graph, edge func(*Edge) error) error {
seen := make(map[*Node]bool)
var visit func(n *Node) error
visit = func(n *Node) error {
if !seen[n] {
seen[n] = true
for _, e := range n.Out {
if err := visit(e.Callee); err != nil {
return err
}
if err := edge(e); err != nil {
return err
}
}
}
return nil
}
for _, n := range g.Nodes {
if err := visit(n); err != nil {
return err
}
}
return nil
}
// PathSearch finds an arbitrary path starting at node start and
// ending at some node for which isEnd() returns true. On success,
// PathSearch returns the path as an ordered list of edges; on
// failure, it returns nil.
//
func PathSearch(start *Node, isEnd func(*Node) bool) []*Edge {
stack := make([]*Edge, 0, 32)
seen := make(map[*Node]bool)
var search func(n *Node) []*Edge
search = func(n *Node) []*Edge {
if !seen[n] {
seen[n] = true
if isEnd(n) {
return stack
}
for _, e := range n.Out {
stack = append(stack, e) // push
if found := search(e.Callee); found != nil {
return found
}
stack = stack[:len(stack)-1] // pop
}
}
return nil
}
return search(start)
}
// DeleteSyntheticNodes removes from call graph g all nodes for
// synthetic functions (except g.Root and package initializers),
// preserving the topology. In effect, calls to synthetic wrappers
// are "inlined".
//
func (g *Graph) DeleteSyntheticNodes() {
// Measurements on the standard library and go.tools show that
// resulting graph has ~15% fewer nodes and 4-8% fewer edges
// than the input.
//
// Inlining a wrapper of in-degree m, out-degree n adds m*n
// and removes m+n edges. Since most wrappers are monomorphic
// (n=1) this results in a slight reduction. Polymorphic
// wrappers (n>1), e.g. from embedding an interface value
// inside a struct to satisfy some interface, cause an
// increase in the graph, but they seem to be uncommon.
// Hash all existing edges to avoid creating duplicates.
edges := make(map[Edge]bool)
for _, cgn := range g.Nodes {
for _, e := range cgn.Out {
edges[*e] = true
}
}
for fn, cgn := range g.Nodes {
if cgn == g.Root || fn.Synthetic == "" || isInit(cgn.Func) {
continue // keep
}
for _, eIn := range cgn.In {
for _, eOut := range cgn.Out {
newEdge := Edge{eIn.Caller, eIn.Site, eOut.Callee}
if edges[newEdge] {
continue // don't add duplicate
}
AddEdge(eIn.Caller, eIn.Site, eOut.Callee)
edges[newEdge] = true
}
}
g.DeleteNode(cgn)
}
}
func isInit(fn *ssa.Function) bool {
return fn.Pkg != nil && fn.Pkg.Func("init") == fn
}
// DeleteNode removes node n and its edges from the graph g.
// (NB: not efficient for batch deletion.)
func (g *Graph) DeleteNode(n *Node) {
n.deleteIns()
n.deleteOuts()
delete(g.Nodes, n.Func)
}
// deleteIns deletes all incoming edges to n.
func (n *Node) deleteIns() {
for _, e := range n.In {
removeOutEdge(e)
}
n.In = nil
}
// deleteOuts deletes all outgoing edges from n.
func (n *Node) deleteOuts() {
for _, e := range n.Out {
removeInEdge(e)
}
n.Out = nil
}
// removeOutEdge removes edge.Caller's outgoing edge 'edge'.
func removeOutEdge(edge *Edge) {
caller := edge.Caller
n := len(caller.Out)
for i, e := range caller.Out {
if e == edge {
// Replace it with the final element and shrink the slice.
caller.Out[i] = caller.Out[n-1]
caller.Out[n-1] = nil // aid GC
caller.Out = caller.Out[:n-1]
return
}
}
panic("edge not found: " + edge.String())
}
// removeInEdge removes edge.Callee's incoming edge 'edge'.
func removeInEdge(edge *Edge) {
caller := edge.Callee
n := len(caller.In)
for i, e := range caller.In {
if e == edge {
// Replace it with the final element and shrink the slice.
caller.In[i] = caller.In[n-1]
caller.In[n-1] = nil // aid GC
caller.In = caller.In[:n-1]
return
}
}
panic("edge not found: " + edge.String())
}

View file

@ -1,15 +0,0 @@
# staticcheck
_staticcheck_ offers extensive analysis of Go code, covering a myriad
of categories. It will detect bugs, suggest code simplifications,
point out dead code, and more.
## Installation
go get honnef.co/go/tools/cmd/staticcheck
## Documentation
Detailed documentation can be found on
[staticcheck.io](https://staticcheck.io/docs/).

View file

@ -1,30 +0,0 @@
// staticcheck analyses Go code and makes it better.
package main // import "honnef.co/go/tools/cmd/staticcheck"
import (
"os"
"honnef.co/go/tools/lint"
"honnef.co/go/tools/lint/lintutil"
"honnef.co/go/tools/simple"
"honnef.co/go/tools/staticcheck"
"honnef.co/go/tools/stylecheck"
"honnef.co/go/tools/unused"
)
func main() {
fs := lintutil.FlagSet("staticcheck")
fs.Parse(os.Args[1:])
checkers := []lint.Checker{
simple.NewChecker(),
staticcheck.NewChecker(),
stylecheck.NewChecker(),
}
uc := unused.NewChecker(unused.CheckAll)
uc.ConsiderReflection = true
checkers = append(checkers, unused.NewLintChecker(uc))
lintutil.ProcessFlagSet(checkers, fs)
}

View file

@ -1,162 +0,0 @@
package config
import (
"os"
"path/filepath"
"github.com/BurntSushi/toml"
)
func mergeLists(a, b []string) []string {
out := make([]string, 0, len(a)+len(b))
for _, el := range b {
if el == "inherit" {
out = append(out, a...)
} else {
out = append(out, el)
}
}
return out
}
func normalizeList(list []string) []string {
if len(list) > 1 {
nlist := make([]string, 0, len(list))
nlist = append(nlist, list[0])
for i, el := range list[1:] {
if el != list[i] {
nlist = append(nlist, el)
}
}
list = nlist
}
for _, el := range list {
if el == "inherit" {
// This should never happen, because the default config
// should not use "inherit"
panic(`unresolved "inherit"`)
}
}
return list
}
func (cfg Config) Merge(ocfg Config) Config {
if ocfg.Checks != nil {
cfg.Checks = mergeLists(cfg.Checks, ocfg.Checks)
}
if ocfg.Initialisms != nil {
cfg.Initialisms = mergeLists(cfg.Initialisms, ocfg.Initialisms)
}
if ocfg.DotImportWhitelist != nil {
cfg.DotImportWhitelist = mergeLists(cfg.DotImportWhitelist, ocfg.DotImportWhitelist)
}
if ocfg.HTTPStatusCodeWhitelist != nil {
cfg.HTTPStatusCodeWhitelist = mergeLists(cfg.HTTPStatusCodeWhitelist, ocfg.HTTPStatusCodeWhitelist)
}
return cfg
}
type Config struct {
// TODO(dh): this implementation makes it impossible for external
// clients to add their own checkers with configuration. At the
// moment, we don't really care about that; we don't encourage
// that people use this package. In the future, we may. The
// obvious solution would be using map[string]interface{}, but
// that's obviously subpar.
Checks []string `toml:"checks"`
Initialisms []string `toml:"initialisms"`
DotImportWhitelist []string `toml:"dot_import_whitelist"`
HTTPStatusCodeWhitelist []string `toml:"http_status_code_whitelist"`
}
var defaultConfig = Config{
Checks: []string{"all", "-ST1003"},
Initialisms: []string{
"ACL", "API", "ASCII", "CPU", "CSS", "DNS",
"EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID",
"IP", "JSON", "QPS", "RAM", "RPC", "SLA",
"SMTP", "SQL", "SSH", "TCP", "TLS", "TTL",
"UDP", "UI", "GID", "UID", "UUID", "URI",
"URL", "UTF8", "VM", "XML", "XMPP", "XSRF",
"XSS",
},
DotImportWhitelist: []string{},
HTTPStatusCodeWhitelist: []string{"200", "400", "404", "500"},
}
const configName = "staticcheck.conf"
func parseConfigs(dir string) ([]Config, error) {
var out []Config
// TODO(dh): consider stopping at the GOPATH/module boundary
for dir != "" {
f, err := os.Open(filepath.Join(dir, configName))
if os.IsNotExist(err) {
ndir := filepath.Dir(dir)
if ndir == dir {
break
}
dir = ndir
continue
}
if err != nil {
return nil, err
}
var cfg Config
_, err = toml.DecodeReader(f, &cfg)
f.Close()
if err != nil {
return nil, err
}
out = append(out, cfg)
ndir := filepath.Dir(dir)
if ndir == dir {
break
}
dir = ndir
}
out = append(out, defaultConfig)
if len(out) < 2 {
return out, nil
}
for i := 0; i < len(out)/2; i++ {
out[i], out[len(out)-1-i] = out[len(out)-1-i], out[i]
}
return out, nil
}
func mergeConfigs(confs []Config) Config {
if len(confs) == 0 {
// This shouldn't happen because we always have at least a
// default config.
panic("trying to merge zero configs")
}
if len(confs) == 1 {
return confs[0]
}
conf := confs[0]
for _, oconf := range confs[1:] {
conf = conf.Merge(oconf)
}
return conf
}
func Load(dir string) (Config, error) {
confs, err := parseConfigs(dir)
if err != nil {
return Config{}, err
}
conf := mergeConfigs(confs)
conf.Checks = normalizeList(conf.Checks)
conf.Initialisms = normalizeList(conf.Initialisms)
conf.DotImportWhitelist = normalizeList(conf.DotImportWhitelist)
conf.HTTPStatusCodeWhitelist = normalizeList(conf.HTTPStatusCodeWhitelist)
return conf, nil
}

View file

@ -1,10 +0,0 @@
checks = ["all", "-ST1003", "-ST1014"]
initialisms = ["ACL", "API", "ASCII", "CPU", "CSS", "DNS",
"EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID",
"IP", "JSON", "QPS", "RAM", "RPC", "SLA",
"SMTP", "SQL", "SSH", "TCP", "TLS", "TTL",
"UDP", "UI", "GID", "UID", "UUID", "URI",
"URL", "UTF8", "VM", "XML", "XMPP", "XSRF",
"XSS"]
dot_import_whitelist = []
http_status_code_whitelist = ["200", "400", "404", "500"]

View file

@ -1,54 +0,0 @@
package deprecated
type Deprecation struct {
DeprecatedSince int
AlternativeAvailableSince int
}
var Stdlib = map[string]Deprecation{
"image/jpeg.Reader": {4, 0},
// FIXME(dh): AllowBinary isn't being detected as deprecated
// because the comment has a newline right after "Deprecated:"
"go/build.AllowBinary": {7, 7},
"(archive/zip.FileHeader).CompressedSize": {1, 1},
"(archive/zip.FileHeader).UncompressedSize": {1, 1},
"(go/doc.Package).Bugs": {1, 1},
"os.SEEK_SET": {7, 7},
"os.SEEK_CUR": {7, 7},
"os.SEEK_END": {7, 7},
"(net.Dialer).Cancel": {7, 7},
"runtime.CPUProfile": {9, 0},
"compress/flate.ReadError": {6, 6},
"compress/flate.WriteError": {6, 6},
"path/filepath.HasPrefix": {0, 0},
"(net/http.Transport).Dial": {7, 7},
"(*net/http.Transport).CancelRequest": {6, 5},
"net/http.ErrWriteAfterFlush": {7, 0},
"net/http.ErrHeaderTooLong": {8, 0},
"net/http.ErrShortBody": {8, 0},
"net/http.ErrMissingContentLength": {8, 0},
"net/http/httputil.ErrPersistEOF": {0, 0},
"net/http/httputil.ErrClosed": {0, 0},
"net/http/httputil.ErrPipeline": {0, 0},
"net/http/httputil.ServerConn": {0, 0},
"net/http/httputil.NewServerConn": {0, 0},
"net/http/httputil.ClientConn": {0, 0},
"net/http/httputil.NewClientConn": {0, 0},
"net/http/httputil.NewProxyClientConn": {0, 0},
"(net/http.Request).Cancel": {7, 7},
"(text/template/parse.PipeNode).Line": {1, 1},
"(text/template/parse.ActionNode).Line": {1, 1},
"(text/template/parse.BranchNode).Line": {1, 1},
"(text/template/parse.TemplateNode).Line": {1, 1},
"database/sql/driver.ColumnConverter": {9, 9},
"database/sql/driver.Execer": {8, 8},
"database/sql/driver.Queryer": {8, 8},
"(database/sql/driver.Conn).Begin": {8, 8},
"(database/sql/driver.Stmt).Exec": {8, 8},
"(database/sql/driver.Stmt).Query": {8, 8},
"syscall.StringByteSlice": {1, 1},
"syscall.StringBytePtr": {1, 1},
"syscall.StringSlicePtr": {1, 1},
"syscall.StringToUTF16": {1, 1},
"syscall.StringToUTF16Ptr": {1, 1},
}

View file

@ -1,56 +0,0 @@
package functions
import (
"go/token"
"go/types"
"honnef.co/go/tools/ssa"
)
func concreteReturnTypes(fn *ssa.Function) []*types.Tuple {
res := fn.Signature.Results()
if res == nil {
return nil
}
ifaces := make([]bool, res.Len())
any := false
for i := 0; i < res.Len(); i++ {
_, ifaces[i] = res.At(i).Type().Underlying().(*types.Interface)
any = any || ifaces[i]
}
if !any {
return []*types.Tuple{res}
}
var out []*types.Tuple
for _, block := range fn.Blocks {
if len(block.Instrs) == 0 {
continue
}
ret, ok := block.Instrs[len(block.Instrs)-1].(*ssa.Return)
if !ok {
continue
}
vars := make([]*types.Var, res.Len())
for i, v := range ret.Results {
var typ types.Type
if !ifaces[i] {
typ = res.At(i).Type()
} else if mi, ok := v.(*ssa.MakeInterface); ok {
// TODO(dh): if mi.X is a function call that returns
// an interface, call concreteReturnTypes on that
// function (or, really, go through Descriptions,
// avoid infinite recursion etc, just like nil error
// detection)
// TODO(dh): support Phi nodes
typ = mi.X.Type()
} else {
typ = res.At(i).Type()
}
vars[i] = types.NewParam(token.NoPos, nil, "", typ)
}
out = append(out, types.NewTuple(vars...))
}
// TODO(dh): deduplicate out
return out
}

View file

@ -1,150 +0,0 @@
package functions
import (
"go/types"
"sync"
"honnef.co/go/tools/callgraph"
"honnef.co/go/tools/callgraph/static"
"honnef.co/go/tools/ssa"
"honnef.co/go/tools/staticcheck/vrp"
)
var stdlibDescs = map[string]Description{
"errors.New": {Pure: true},
"fmt.Errorf": {Pure: true},
"fmt.Sprintf": {Pure: true},
"fmt.Sprint": {Pure: true},
"sort.Reverse": {Pure: true},
"strings.Map": {Pure: true},
"strings.Repeat": {Pure: true},
"strings.Replace": {Pure: true},
"strings.Title": {Pure: true},
"strings.ToLower": {Pure: true},
"strings.ToLowerSpecial": {Pure: true},
"strings.ToTitle": {Pure: true},
"strings.ToTitleSpecial": {Pure: true},
"strings.ToUpper": {Pure: true},
"strings.ToUpperSpecial": {Pure: true},
"strings.Trim": {Pure: true},
"strings.TrimFunc": {Pure: true},
"strings.TrimLeft": {Pure: true},
"strings.TrimLeftFunc": {Pure: true},
"strings.TrimPrefix": {Pure: true},
"strings.TrimRight": {Pure: true},
"strings.TrimRightFunc": {Pure: true},
"strings.TrimSpace": {Pure: true},
"strings.TrimSuffix": {Pure: true},
"(*net/http.Request).WithContext": {Pure: true},
"math/rand.Read": {NilError: true},
"(*math/rand.Rand).Read": {NilError: true},
}
type Description struct {
// The function is known to be pure
Pure bool
// The function is known to be a stub
Stub bool
// The function is known to never return (panics notwithstanding)
Infinite bool
// Variable ranges
Ranges vrp.Ranges
Loops []Loop
// Function returns an error as its last argument, but it is
// always nil
NilError bool
ConcreteReturnTypes []*types.Tuple
}
type descriptionEntry struct {
ready chan struct{}
result Description
}
type Descriptions struct {
CallGraph *callgraph.Graph
mu sync.Mutex
cache map[*ssa.Function]*descriptionEntry
}
func NewDescriptions(prog *ssa.Program) *Descriptions {
return &Descriptions{
CallGraph: static.CallGraph(prog),
cache: map[*ssa.Function]*descriptionEntry{},
}
}
func (d *Descriptions) Get(fn *ssa.Function) Description {
d.mu.Lock()
fd := d.cache[fn]
if fd == nil {
fd = &descriptionEntry{
ready: make(chan struct{}),
}
d.cache[fn] = fd
d.mu.Unlock()
{
fd.result = stdlibDescs[fn.RelString(nil)]
fd.result.Pure = fd.result.Pure || d.IsPure(fn)
fd.result.Stub = fd.result.Stub || d.IsStub(fn)
fd.result.Infinite = fd.result.Infinite || !terminates(fn)
fd.result.Ranges = vrp.BuildGraph(fn).Solve()
fd.result.Loops = findLoops(fn)
fd.result.NilError = fd.result.NilError || IsNilError(fn)
fd.result.ConcreteReturnTypes = concreteReturnTypes(fn)
}
close(fd.ready)
} else {
d.mu.Unlock()
<-fd.ready
}
return fd.result
}
func IsNilError(fn *ssa.Function) bool {
// TODO(dh): This is very simplistic, as we only look for constant
// nil returns. A more advanced approach would work transitively.
// An even more advanced approach would be context-aware and
// determine nil errors based on inputs (e.g. io.WriteString to a
// bytes.Buffer will always return nil, but an io.WriteString to
// an os.File might not). Similarly, an os.File opened for reading
// won't error on Close, but other files will.
res := fn.Signature.Results()
if res.Len() == 0 {
return false
}
last := res.At(res.Len() - 1)
if types.TypeString(last.Type(), nil) != "error" {
return false
}
if fn.Blocks == nil {
return false
}
for _, block := range fn.Blocks {
if len(block.Instrs) == 0 {
continue
}
ins := block.Instrs[len(block.Instrs)-1]
ret, ok := ins.(*ssa.Return)
if !ok {
continue
}
v := ret.Results[len(ret.Results)-1]
c, ok := v.(*ssa.Const)
if !ok {
return false
}
if !c.IsNil() {
return false
}
}
return true
}

View file

@ -1,50 +0,0 @@
package functions
import "honnef.co/go/tools/ssa"
type Loop map[*ssa.BasicBlock]bool
func findLoops(fn *ssa.Function) []Loop {
if fn.Blocks == nil {
return nil
}
tree := fn.DomPreorder()
var sets []Loop
for _, h := range tree {
for _, n := range h.Preds {
if !h.Dominates(n) {
continue
}
// n is a back-edge to h
// h is the loop header
if n == h {
sets = append(sets, Loop{n: true})
continue
}
set := Loop{h: true, n: true}
for _, b := range allPredsBut(n, h, nil) {
set[b] = true
}
sets = append(sets, set)
}
}
return sets
}
func allPredsBut(b, but *ssa.BasicBlock, list []*ssa.BasicBlock) []*ssa.BasicBlock {
outer:
for _, pred := range b.Preds {
if pred == but {
continue
}
for _, p := range list {
// TODO improve big-o complexity of this function
if pred == p {
continue outer
}
}
list = append(list, pred)
list = allPredsBut(pred, but, list)
}
return list
}

View file

@ -1,123 +0,0 @@
package functions
import (
"go/token"
"go/types"
"honnef.co/go/tools/callgraph"
"honnef.co/go/tools/lint/lintdsl"
"honnef.co/go/tools/ssa"
)
// IsStub reports whether a function is a stub. A function is
// considered a stub if it has no instructions or exactly one
// instruction, which must be either returning only constant values or
// a panic.
func (d *Descriptions) IsStub(fn *ssa.Function) bool {
if len(fn.Blocks) == 0 {
return true
}
if len(fn.Blocks) > 1 {
return false
}
instrs := lintdsl.FilterDebug(fn.Blocks[0].Instrs)
if len(instrs) != 1 {
return false
}
switch instrs[0].(type) {
case *ssa.Return:
// Since this is the only instruction, the return value must
// be a constant. We consider all constants as stubs, not just
// the zero value. This does not, unfortunately, cover zero
// initialised structs, as these cause additional
// instructions.
return true
case *ssa.Panic:
return true
default:
return false
}
}
func (d *Descriptions) IsPure(fn *ssa.Function) bool {
if fn.Signature.Results().Len() == 0 {
// A function with no return values is empty or is doing some
// work we cannot see (for example because of build tags);
// don't consider it pure.
return false
}
for _, param := range fn.Params {
if _, ok := param.Type().Underlying().(*types.Basic); !ok {
return false
}
}
if fn.Blocks == nil {
return false
}
checkCall := func(common *ssa.CallCommon) bool {
if common.IsInvoke() {
return false
}
builtin, ok := common.Value.(*ssa.Builtin)
if !ok {
if common.StaticCallee() != fn {
if common.StaticCallee() == nil {
return false
}
// TODO(dh): ideally, IsPure wouldn't be responsible
// for avoiding infinite recursion, but
// FunctionDescriptions would be.
node := d.CallGraph.CreateNode(common.StaticCallee())
if callgraph.PathSearch(node, func(other *callgraph.Node) bool {
return other.Func == fn
}) != nil {
return false
}
if !d.Get(common.StaticCallee()).Pure {
return false
}
}
} else {
switch builtin.Name() {
case "len", "cap", "make", "new":
default:
return false
}
}
return true
}
for _, b := range fn.Blocks {
for _, ins := range b.Instrs {
switch ins := ins.(type) {
case *ssa.Call:
if !checkCall(ins.Common()) {
return false
}
case *ssa.Defer:
if !checkCall(&ins.Call) {
return false
}
case *ssa.Select:
return false
case *ssa.Send:
return false
case *ssa.Go:
return false
case *ssa.Panic:
return false
case *ssa.Store:
return false
case *ssa.FieldAddr:
return false
case *ssa.UnOp:
if ins.Op == token.MUL || ins.Op == token.AND {
return false
}
}
}
}
return true
}

View file

@ -1,24 +0,0 @@
package functions
import "honnef.co/go/tools/ssa"
// terminates reports whether fn is supposed to return, that is if it
// has at least one theoretic path that returns from the function.
// Explicit panics do not count as terminating.
func terminates(fn *ssa.Function) bool {
if fn.Blocks == nil {
// assuming that a function terminates is the conservative
// choice
return true
}
for _, block := range fn.Blocks {
if len(block.Instrs) == 0 {
continue
}
if _, ok := block.Instrs[len(block.Instrs)-1].(*ssa.Return); ok {
return true
}
}
return false
}

View file

@ -1,68 +0,0 @@
package sharedcheck
import (
"go/ast"
"go/types"
"honnef.co/go/tools/lint"
. "honnef.co/go/tools/lint/lintdsl"
"honnef.co/go/tools/ssa"
)
func CheckRangeStringRunes(j *lint.Job) {
for _, ssafn := range j.Program.InitialFunctions {
fn := func(node ast.Node) bool {
rng, ok := node.(*ast.RangeStmt)
if !ok || !IsBlank(rng.Key) {
return true
}
v, _ := ssafn.ValueForExpr(rng.X)
// Check that we're converting from string to []rune
val, _ := v.(*ssa.Convert)
if val == nil {
return true
}
Tsrc, ok := val.X.Type().(*types.Basic)
if !ok || Tsrc.Kind() != types.String {
return true
}
Tdst, ok := val.Type().(*types.Slice)
if !ok {
return true
}
TdstElem, ok := Tdst.Elem().(*types.Basic)
if !ok || TdstElem.Kind() != types.Int32 {
return true
}
// Check that the result of the conversion is only used to
// range over
refs := val.Referrers()
if refs == nil {
return true
}
// Expect two refs: one for obtaining the length of the slice,
// one for accessing the elements
if len(FilterDebug(*refs)) != 2 {
// TODO(dh): right now, we check that only one place
// refers to our slice. This will miss cases such as
// ranging over the slice twice. Ideally, we'd ensure that
// the slice is only used for ranging over (without
// accessing the key), but that is harder to do because in
// SSA form, ranging over a slice looks like an ordinary
// loop with index increments and slice accesses. We'd
// have to look at the associated AST node to check that
// it's a range statement.
return true
}
j.Errorf(rng, "should range over string, not []rune(string)")
return true
}
Inspect(ssafn.Syntax(), fn)
}
}

View file

@ -1,28 +0,0 @@
Copyright (c) 2013 The Go Authors. All rights reserved.
Copyright (c) 2016 Dominik Honnef. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,33 +0,0 @@
package lint
import (
"bufio"
"bytes"
"io"
)
var (
prefix = []byte("// Code generated ")
suffix = []byte(" DO NOT EDIT.")
nl = []byte("\n")
crnl = []byte("\r\n")
)
func isGenerated(r io.Reader) bool {
br := bufio.NewReader(r)
for {
s, err := br.ReadBytes('\n')
if err != nil && err != io.EOF {
return false
}
s = bytes.TrimSuffix(s, crnl)
s = bytes.TrimSuffix(s, nl)
if bytes.HasPrefix(s, prefix) && bytes.HasSuffix(s, suffix) {
return true
}
if err == io.EOF {
break
}
}
return false
}

View file

@ -1,716 +0,0 @@
// Package lint provides the foundation for tools like staticcheck
package lint // import "honnef.co/go/tools/lint"
import (
"fmt"
"go/ast"
"go/token"
"go/types"
"io"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"time"
"unicode"
"golang.org/x/tools/go/packages"
"honnef.co/go/tools/config"
"honnef.co/go/tools/ssa"
"honnef.co/go/tools/ssa/ssautil"
)
type Job struct {
Program *Program
checker string
check Check
problems []Problem
duration time.Duration
}
type Ignore interface {
Match(p Problem) bool
}
type LineIgnore struct {
File string
Line int
Checks []string
matched bool
pos token.Pos
}
func (li *LineIgnore) Match(p Problem) bool {
if p.Position.Filename != li.File || p.Position.Line != li.Line {
return false
}
for _, c := range li.Checks {
if m, _ := filepath.Match(c, p.Check); m {
li.matched = true
return true
}
}
return false
}
func (li *LineIgnore) String() string {
matched := "not matched"
if li.matched {
matched = "matched"
}
return fmt.Sprintf("%s:%d %s (%s)", li.File, li.Line, strings.Join(li.Checks, ", "), matched)
}
type FileIgnore struct {
File string
Checks []string
}
func (fi *FileIgnore) Match(p Problem) bool {
if p.Position.Filename != fi.File {
return false
}
for _, c := range fi.Checks {
if m, _ := filepath.Match(c, p.Check); m {
return true
}
}
return false
}
type GlobIgnore struct {
Pattern string
Checks []string
}
func (gi *GlobIgnore) Match(p Problem) bool {
if gi.Pattern != "*" {
pkgpath := p.Package.Types.Path()
if strings.HasSuffix(pkgpath, "_test") {
pkgpath = pkgpath[:len(pkgpath)-len("_test")]
}
name := filepath.Join(pkgpath, filepath.Base(p.Position.Filename))
if m, _ := filepath.Match(gi.Pattern, name); !m {
return false
}
}
for _, c := range gi.Checks {
if m, _ := filepath.Match(c, p.Check); m {
return true
}
}
return false
}
type Program struct {
SSA *ssa.Program
InitialPackages []*Pkg
InitialFunctions []*ssa.Function
AllPackages []*packages.Package
AllFunctions []*ssa.Function
Files []*ast.File
GoVersion int
tokenFileMap map[*token.File]*ast.File
astFileMap map[*ast.File]*Pkg
packagesMap map[string]*packages.Package
genMu sync.RWMutex
generatedMap map[string]bool
}
func (prog *Program) Fset() *token.FileSet {
return prog.InitialPackages[0].Fset
}
type Func func(*Job)
type Severity uint8
const (
Error Severity = iota
Warning
Ignored
)
// Problem represents a problem in some source code.
type Problem struct {
Position token.Position // position in source file
Text string // the prose that describes the problem
Check string
Checker string
Package *Pkg
Severity Severity
}
func (p *Problem) String() string {
if p.Check == "" {
return p.Text
}
return fmt.Sprintf("%s (%s)", p.Text, p.Check)
}
type Checker interface {
Name() string
Prefix() string
Init(*Program)
Checks() []Check
}
type Check struct {
Fn Func
ID string
FilterGenerated bool
}
// A Linter lints Go source code.
type Linter struct {
Checkers []Checker
Ignores []Ignore
GoVersion int
ReturnIgnored bool
Config config.Config
MaxConcurrentJobs int
PrintStats bool
automaticIgnores []Ignore
}
func (l *Linter) ignore(p Problem) bool {
ignored := false
for _, ig := range l.automaticIgnores {
// We cannot short-circuit these, as we want to record, for
// each ignore, whether it matched or not.
if ig.Match(p) {
ignored = true
}
}
if ignored {
// no need to execute other ignores if we've already had a
// match.
return true
}
for _, ig := range l.Ignores {
// We can short-circuit here, as we aren't tracking any
// information.
if ig.Match(p) {
return true
}
}
return false
}
func (prog *Program) File(node Positioner) *ast.File {
return prog.tokenFileMap[prog.SSA.Fset.File(node.Pos())]
}
func (j *Job) File(node Positioner) *ast.File {
return j.Program.File(node)
}
func parseDirective(s string) (cmd string, args []string) {
if !strings.HasPrefix(s, "//lint:") {
return "", nil
}
s = strings.TrimPrefix(s, "//lint:")
fields := strings.Split(s, " ")
return fields[0], fields[1:]
}
type PerfStats struct {
PackageLoading time.Duration
SSABuild time.Duration
OtherInitWork time.Duration
CheckerInits map[string]time.Duration
Jobs []JobStat
}
type JobStat struct {
Job string
Duration time.Duration
}
func (stats *PerfStats) Print(w io.Writer) {
fmt.Fprintln(w, "Package loading:", stats.PackageLoading)
fmt.Fprintln(w, "SSA build:", stats.SSABuild)
fmt.Fprintln(w, "Other init work:", stats.OtherInitWork)
fmt.Fprintln(w, "Checker inits:")
for checker, d := range stats.CheckerInits {
fmt.Fprintf(w, "\t%s: %s\n", checker, d)
}
fmt.Fprintln(w)
fmt.Fprintln(w, "Jobs:")
sort.Slice(stats.Jobs, func(i, j int) bool {
return stats.Jobs[i].Duration < stats.Jobs[j].Duration
})
var total time.Duration
for _, job := range stats.Jobs {
fmt.Fprintf(w, "\t%s: %s\n", job.Job, job.Duration)
total += job.Duration
}
fmt.Fprintf(w, "\tTotal: %s\n", total)
}
func (l *Linter) Lint(initial []*packages.Package, stats *PerfStats) []Problem {
allPkgs := allPackages(initial)
t := time.Now()
ssaprog, _ := ssautil.Packages(allPkgs, ssa.GlobalDebug)
ssaprog.Build()
if stats != nil {
stats.SSABuild = time.Since(t)
}
t = time.Now()
pkgMap := map[*ssa.Package]*Pkg{}
var pkgs []*Pkg
for _, pkg := range initial {
ssapkg := ssaprog.Package(pkg.Types)
var cfg config.Config
if len(pkg.GoFiles) != 0 {
path := pkg.GoFiles[0]
dir := filepath.Dir(path)
var err error
// OPT(dh): we're rebuilding the entire config tree for
// each package. for example, if we check a/b/c and
// a/b/c/d, we'll process a, a/b, a/b/c, a, a/b, a/b/c,
// a/b/c/d we should cache configs per package and only
// load the new levels.
cfg, err = config.Load(dir)
if err != nil {
// FIXME(dh): we couldn't load the config, what are we
// supposed to do? probably tell the user somehow
}
cfg = cfg.Merge(l.Config)
}
pkg := &Pkg{
SSA: ssapkg,
Package: pkg,
Config: cfg,
}
pkgMap[ssapkg] = pkg
pkgs = append(pkgs, pkg)
}
prog := &Program{
SSA: ssaprog,
InitialPackages: pkgs,
AllPackages: allPkgs,
GoVersion: l.GoVersion,
tokenFileMap: map[*token.File]*ast.File{},
astFileMap: map[*ast.File]*Pkg{},
generatedMap: map[string]bool{},
}
prog.packagesMap = map[string]*packages.Package{}
for _, pkg := range allPkgs {
prog.packagesMap[pkg.Types.Path()] = pkg
}
isInitial := map[*types.Package]struct{}{}
for _, pkg := range pkgs {
isInitial[pkg.Types] = struct{}{}
}
for fn := range ssautil.AllFunctions(ssaprog) {
if fn.Pkg == nil {
continue
}
prog.AllFunctions = append(prog.AllFunctions, fn)
if _, ok := isInitial[fn.Pkg.Pkg]; ok {
prog.InitialFunctions = append(prog.InitialFunctions, fn)
}
}
for _, pkg := range pkgs {
prog.Files = append(prog.Files, pkg.Syntax...)
ssapkg := ssaprog.Package(pkg.Types)
for _, f := range pkg.Syntax {
prog.astFileMap[f] = pkgMap[ssapkg]
}
}
for _, pkg := range allPkgs {
for _, f := range pkg.Syntax {
tf := pkg.Fset.File(f.Pos())
prog.tokenFileMap[tf] = f
}
}
var out []Problem
l.automaticIgnores = nil
for _, pkg := range initial {
for _, f := range pkg.Syntax {
cm := ast.NewCommentMap(pkg.Fset, f, f.Comments)
for node, cgs := range cm {
for _, cg := range cgs {
for _, c := range cg.List {
if !strings.HasPrefix(c.Text, "//lint:") {
continue
}
cmd, args := parseDirective(c.Text)
switch cmd {
case "ignore", "file-ignore":
if len(args) < 2 {
// FIXME(dh): this causes duplicated warnings when using megacheck
p := Problem{
Position: prog.DisplayPosition(c.Pos()),
Text: "malformed linter directive; missing the required reason field?",
Check: "",
Checker: "lint",
Package: nil,
}
out = append(out, p)
continue
}
default:
// unknown directive, ignore
continue
}
checks := strings.Split(args[0], ",")
pos := prog.DisplayPosition(node.Pos())
var ig Ignore
switch cmd {
case "ignore":
ig = &LineIgnore{
File: pos.Filename,
Line: pos.Line,
Checks: checks,
pos: c.Pos(),
}
case "file-ignore":
ig = &FileIgnore{
File: pos.Filename,
Checks: checks,
}
}
l.automaticIgnores = append(l.automaticIgnores, ig)
}
}
}
}
}
sizes := struct {
types int
defs int
uses int
implicits int
selections int
scopes int
}{}
for _, pkg := range pkgs {
sizes.types += len(pkg.TypesInfo.Types)
sizes.defs += len(pkg.TypesInfo.Defs)
sizes.uses += len(pkg.TypesInfo.Uses)
sizes.implicits += len(pkg.TypesInfo.Implicits)
sizes.selections += len(pkg.TypesInfo.Selections)
sizes.scopes += len(pkg.TypesInfo.Scopes)
}
if stats != nil {
stats.OtherInitWork = time.Since(t)
}
for _, checker := range l.Checkers {
t := time.Now()
checker.Init(prog)
if stats != nil {
stats.CheckerInits[checker.Name()] = time.Since(t)
}
}
var jobs []*Job
var allChecks []string
for _, checker := range l.Checkers {
checks := checker.Checks()
for _, check := range checks {
allChecks = append(allChecks, check.ID)
j := &Job{
Program: prog,
checker: checker.Name(),
check: check,
}
jobs = append(jobs, j)
}
}
max := len(jobs)
if l.MaxConcurrentJobs > 0 {
max = l.MaxConcurrentJobs
}
sem := make(chan struct{}, max)
wg := &sync.WaitGroup{}
for _, j := range jobs {
wg.Add(1)
go func(j *Job) {
defer wg.Done()
sem <- struct{}{}
defer func() { <-sem }()
fn := j.check.Fn
if fn == nil {
return
}
t := time.Now()
fn(j)
j.duration = time.Since(t)
}(j)
}
wg.Wait()
for _, j := range jobs {
if stats != nil {
stats.Jobs = append(stats.Jobs, JobStat{j.check.ID, j.duration})
}
for _, p := range j.problems {
allowedChecks := FilterChecks(allChecks, p.Package.Config.Checks)
if l.ignore(p) {
p.Severity = Ignored
}
// TODO(dh): support globs in check white/blacklist
// OPT(dh): this approach doesn't actually disable checks,
// it just discards their results. For the moment, that's
// fine. None of our checks are super expensive. In the
// future, we may want to provide opt-in expensive
// analysis, which shouldn't run at all. It may be easiest
// to implement this in the individual checks.
if (l.ReturnIgnored || p.Severity != Ignored) && allowedChecks[p.Check] {
out = append(out, p)
}
}
}
for _, ig := range l.automaticIgnores {
ig, ok := ig.(*LineIgnore)
if !ok {
continue
}
if ig.matched {
continue
}
couldveMatched := false
for f, pkg := range prog.astFileMap {
if prog.Fset().Position(f.Pos()).Filename != ig.File {
continue
}
allowedChecks := FilterChecks(allChecks, pkg.Config.Checks)
for _, c := range ig.Checks {
if !allowedChecks[c] {
continue
}
couldveMatched = true
break
}
break
}
if !couldveMatched {
// The ignored checks were disabled for the containing package.
// Don't flag the ignore for not having matched.
continue
}
p := Problem{
Position: prog.DisplayPosition(ig.pos),
Text: "this linter directive didn't match anything; should it be removed?",
Check: "",
Checker: "lint",
Package: nil,
}
out = append(out, p)
}
sort.Slice(out, func(i int, j int) bool {
pi, pj := out[i].Position, out[j].Position
if pi.Filename != pj.Filename {
return pi.Filename < pj.Filename
}
if pi.Line != pj.Line {
return pi.Line < pj.Line
}
if pi.Column != pj.Column {
return pi.Column < pj.Column
}
return out[i].Text < out[j].Text
})
if len(out) < 2 {
return out
}
uniq := make([]Problem, 0, len(out))
uniq = append(uniq, out[0])
prev := out[0]
for _, p := range out[1:] {
if prev.Position == p.Position && prev.Text == p.Text {
continue
}
prev = p
uniq = append(uniq, p)
}
if l.PrintStats && stats != nil {
stats.Print(os.Stderr)
}
return uniq
}
func FilterChecks(allChecks []string, checks []string) map[string]bool {
// OPT(dh): this entire computation could be cached per package
allowedChecks := map[string]bool{}
for _, check := range checks {
b := true
if len(check) > 1 && check[0] == '-' {
b = false
check = check[1:]
}
if check == "*" || check == "all" {
// Match all
for _, c := range allChecks {
allowedChecks[c] = b
}
} else if strings.HasSuffix(check, "*") {
// Glob
prefix := check[:len(check)-1]
isCat := strings.IndexFunc(prefix, func(r rune) bool { return unicode.IsNumber(r) }) == -1
for _, c := range allChecks {
idx := strings.IndexFunc(c, func(r rune) bool { return unicode.IsNumber(r) })
if isCat {
// Glob is S*, which should match S1000 but not SA1000
cat := c[:idx]
if prefix == cat {
allowedChecks[c] = b
}
} else {
// Glob is S1*
if strings.HasPrefix(c, prefix) {
allowedChecks[c] = b
}
}
}
} else {
// Literal check name
allowedChecks[check] = b
}
}
return allowedChecks
}
func (prog *Program) Package(path string) *packages.Package {
return prog.packagesMap[path]
}
// Pkg represents a package being linted.
type Pkg struct {
SSA *ssa.Package
*packages.Package
Config config.Config
}
type Positioner interface {
Pos() token.Pos
}
func (prog *Program) DisplayPosition(p token.Pos) token.Position {
// Only use the adjusted position if it points to another Go file.
// This means we'll point to the original file for cgo files, but
// we won't point to a YACC grammar file.
pos := prog.Fset().PositionFor(p, false)
adjPos := prog.Fset().PositionFor(p, true)
if filepath.Ext(adjPos.Filename) == ".go" {
return adjPos
}
return pos
}
func (prog *Program) isGenerated(path string) bool {
// This function isn't very efficient in terms of lock contention
// and lack of parallelism, but it really shouldn't matter.
// Projects consists of thousands of files, and have hundreds of
// errors. That's not a lot of calls to isGenerated.
prog.genMu.RLock()
if b, ok := prog.generatedMap[path]; ok {
prog.genMu.RUnlock()
return b
}
prog.genMu.RUnlock()
prog.genMu.Lock()
defer prog.genMu.Unlock()
// recheck to avoid doing extra work in case of race
if b, ok := prog.generatedMap[path]; ok {
return b
}
f, err := os.Open(path)
if err != nil {
return false
}
defer f.Close()
b := isGenerated(f)
prog.generatedMap[path] = b
return b
}
func (j *Job) Errorf(n Positioner, format string, args ...interface{}) *Problem {
tf := j.Program.SSA.Fset.File(n.Pos())
f := j.Program.tokenFileMap[tf]
pkg := j.Program.astFileMap[f]
pos := j.Program.DisplayPosition(n.Pos())
if j.Program.isGenerated(pos.Filename) && j.check.FilterGenerated {
return nil
}
problem := Problem{
Position: pos,
Text: fmt.Sprintf(format, args...),
Check: j.check.ID,
Checker: j.checker,
Package: pkg,
}
j.problems = append(j.problems, problem)
return &j.problems[len(j.problems)-1]
}
func (j *Job) NodePackage(node Positioner) *Pkg {
f := j.File(node)
return j.Program.astFileMap[f]
}
// TODO(dh): replace with packages.Visit
func allPackages(pkgs []*packages.Package) []*packages.Package {
all := map[*packages.Package]bool{}
var wl []*packages.Package
wl = append(wl, pkgs...)
for len(wl) > 0 {
pkg := wl[len(wl)-1]
wl = wl[:len(wl)-1]
if all[pkg] {
continue
}
all[pkg] = true
for _, imp := range pkg.Imports {
wl = append(wl, imp)
}
}
out := make([]*packages.Package, 0, len(all))
for pkg := range all {
out = append(out, pkg)
}
return out
}

View file

@ -1,301 +0,0 @@
// Package lintdsl provides helpers for implementing static analysis
// checks. Dot-importing this package is encouraged.
package lintdsl
import (
"bytes"
"fmt"
"go/ast"
"go/constant"
"go/printer"
"go/token"
"go/types"
"strings"
"honnef.co/go/tools/lint"
"honnef.co/go/tools/ssa"
)
type packager interface {
Package() *ssa.Package
}
func CallName(call *ssa.CallCommon) string {
if call.IsInvoke() {
return ""
}
switch v := call.Value.(type) {
case *ssa.Function:
fn, ok := v.Object().(*types.Func)
if !ok {
return ""
}
return fn.FullName()
case *ssa.Builtin:
return v.Name()
}
return ""
}
func IsCallTo(call *ssa.CallCommon, name string) bool { return CallName(call) == name }
func IsType(T types.Type, name string) bool { return types.TypeString(T, nil) == name }
func FilterDebug(instr []ssa.Instruction) []ssa.Instruction {
var out []ssa.Instruction
for _, ins := range instr {
if _, ok := ins.(*ssa.DebugRef); !ok {
out = append(out, ins)
}
}
return out
}
func IsExample(fn *ssa.Function) bool {
if !strings.HasPrefix(fn.Name(), "Example") {
return false
}
f := fn.Prog.Fset.File(fn.Pos())
if f == nil {
return false
}
return strings.HasSuffix(f.Name(), "_test.go")
}
func IsPointerLike(T types.Type) bool {
switch T := T.Underlying().(type) {
case *types.Interface, *types.Chan, *types.Map, *types.Pointer:
return true
case *types.Basic:
return T.Kind() == types.UnsafePointer
}
return false
}
func IsGenerated(f *ast.File) bool {
comments := f.Comments
if len(comments) > 0 {
comment := comments[0].Text()
return strings.Contains(comment, "Code generated by") ||
strings.Contains(comment, "DO NOT EDIT")
}
return false
}
func IsIdent(expr ast.Expr, ident string) bool {
id, ok := expr.(*ast.Ident)
return ok && id.Name == ident
}
// isBlank returns whether id is the blank identifier "_".
// If id == nil, the answer is false.
func IsBlank(id ast.Expr) bool {
ident, _ := id.(*ast.Ident)
return ident != nil && ident.Name == "_"
}
func IsIntLiteral(expr ast.Expr, literal string) bool {
lit, ok := expr.(*ast.BasicLit)
return ok && lit.Kind == token.INT && lit.Value == literal
}
// Deprecated: use IsIntLiteral instead
func IsZero(expr ast.Expr) bool {
return IsIntLiteral(expr, "0")
}
func TypeOf(j *lint.Job, expr ast.Expr) types.Type {
if expr == nil {
return nil
}
return j.NodePackage(expr).TypesInfo.TypeOf(expr)
}
func IsOfType(j *lint.Job, expr ast.Expr, name string) bool { return IsType(TypeOf(j, expr), name) }
func ObjectOf(j *lint.Job, ident *ast.Ident) types.Object {
if ident == nil {
return nil
}
return j.NodePackage(ident).TypesInfo.ObjectOf(ident)
}
func IsInTest(j *lint.Job, node lint.Positioner) bool {
// FIXME(dh): this doesn't work for global variables with
// initializers
f := j.Program.SSA.Fset.File(node.Pos())
return f != nil && strings.HasSuffix(f.Name(), "_test.go")
}
func IsInMain(j *lint.Job, node lint.Positioner) bool {
if node, ok := node.(packager); ok {
return node.Package().Pkg.Name() == "main"
}
pkg := j.NodePackage(node)
if pkg == nil {
return false
}
return pkg.Types.Name() == "main"
}
func SelectorName(j *lint.Job, expr *ast.SelectorExpr) string {
info := j.NodePackage(expr).TypesInfo
sel := info.Selections[expr]
if sel == nil {
if x, ok := expr.X.(*ast.Ident); ok {
pkg, ok := info.ObjectOf(x).(*types.PkgName)
if !ok {
// This shouldn't happen
return fmt.Sprintf("%s.%s", x.Name, expr.Sel.Name)
}
return fmt.Sprintf("%s.%s", pkg.Imported().Path(), expr.Sel.Name)
}
panic(fmt.Sprintf("unsupported selector: %v", expr))
}
return fmt.Sprintf("(%s).%s", sel.Recv(), sel.Obj().Name())
}
func IsNil(j *lint.Job, expr ast.Expr) bool {
return j.NodePackage(expr).TypesInfo.Types[expr].IsNil()
}
func BoolConst(j *lint.Job, expr ast.Expr) bool {
val := j.NodePackage(expr).TypesInfo.ObjectOf(expr.(*ast.Ident)).(*types.Const).Val()
return constant.BoolVal(val)
}
func IsBoolConst(j *lint.Job, expr ast.Expr) bool {
// We explicitly don't support typed bools because more often than
// not, custom bool types are used as binary enums and the
// explicit comparison is desired.
ident, ok := expr.(*ast.Ident)
if !ok {
return false
}
obj := j.NodePackage(expr).TypesInfo.ObjectOf(ident)
c, ok := obj.(*types.Const)
if !ok {
return false
}
basic, ok := c.Type().(*types.Basic)
if !ok {
return false
}
if basic.Kind() != types.UntypedBool && basic.Kind() != types.Bool {
return false
}
return true
}
func ExprToInt(j *lint.Job, expr ast.Expr) (int64, bool) {
tv := j.NodePackage(expr).TypesInfo.Types[expr]
if tv.Value == nil {
return 0, false
}
if tv.Value.Kind() != constant.Int {
return 0, false
}
return constant.Int64Val(tv.Value)
}
func ExprToString(j *lint.Job, expr ast.Expr) (string, bool) {
val := j.NodePackage(expr).TypesInfo.Types[expr].Value
if val == nil {
return "", false
}
if val.Kind() != constant.String {
return "", false
}
return constant.StringVal(val), true
}
// Dereference returns a pointer's element type; otherwise it returns
// T.
func Dereference(T types.Type) types.Type {
if p, ok := T.Underlying().(*types.Pointer); ok {
return p.Elem()
}
return T
}
// DereferenceR returns a pointer's element type; otherwise it returns
// T. If the element type is itself a pointer, DereferenceR will be
// applied recursively.
func DereferenceR(T types.Type) types.Type {
if p, ok := T.Underlying().(*types.Pointer); ok {
return DereferenceR(p.Elem())
}
return T
}
func IsGoVersion(j *lint.Job, minor int) bool {
return j.Program.GoVersion >= minor
}
func CallNameAST(j *lint.Job, call *ast.CallExpr) string {
sel, ok := call.Fun.(*ast.SelectorExpr)
if !ok {
return ""
}
fn, ok := j.NodePackage(call).TypesInfo.ObjectOf(sel.Sel).(*types.Func)
if !ok {
return ""
}
return fn.FullName()
}
func IsCallToAST(j *lint.Job, node ast.Node, name string) bool {
call, ok := node.(*ast.CallExpr)
if !ok {
return false
}
return CallNameAST(j, call) == name
}
func IsCallToAnyAST(j *lint.Job, node ast.Node, names ...string) bool {
for _, name := range names {
if IsCallToAST(j, node, name) {
return true
}
}
return false
}
func Render(j *lint.Job, x interface{}) string {
fset := j.Program.SSA.Fset
var buf bytes.Buffer
if err := printer.Fprint(&buf, fset, x); err != nil {
panic(err)
}
return buf.String()
}
func RenderArgs(j *lint.Job, args []ast.Expr) string {
var ss []string
for _, arg := range args {
ss = append(ss, Render(j, arg))
}
return strings.Join(ss, ", ")
}
func Preamble(f *ast.File) string {
cutoff := f.Package
if f.Doc != nil {
cutoff = f.Doc.Pos()
}
var out []string
for _, cmt := range f.Comments {
if cmt.Pos() >= cutoff {
break
}
out = append(out, cmt.Text())
}
return strings.Join(out, "\n")
}
func Inspect(node ast.Node, fn func(node ast.Node) bool) {
if node == nil {
return
}
ast.Inspect(node, fn)
}

View file

@ -1,124 +0,0 @@
// Package format provides formatters for linter problems.
package format
import (
"encoding/json"
"fmt"
"go/token"
"io"
"os"
"path/filepath"
"text/tabwriter"
"honnef.co/go/tools/lint"
)
func shortPath(path string) string {
cwd, err := os.Getwd()
if err != nil {
return path
}
if rel, err := filepath.Rel(cwd, path); err == nil && len(rel) < len(path) {
return rel
}
return path
}
func relativePositionString(pos token.Position) string {
s := shortPath(pos.Filename)
if pos.IsValid() {
if s != "" {
s += ":"
}
s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
}
if s == "" {
s = "-"
}
return s
}
type Statter interface {
Stats(total, errors, warnings int)
}
type Formatter interface {
Format(p lint.Problem)
}
type Text struct {
W io.Writer
}
func (o Text) Format(p lint.Problem) {
fmt.Fprintf(o.W, "%v: %s\n", relativePositionString(p.Position), p.String())
}
type JSON struct {
W io.Writer
}
func severity(s lint.Severity) string {
switch s {
case lint.Error:
return "error"
case lint.Warning:
return "warning"
case lint.Ignored:
return "ignored"
}
return ""
}
func (o JSON) Format(p lint.Problem) {
type location struct {
File string `json:"file"`
Line int `json:"line"`
Column int `json:"column"`
}
jp := struct {
Code string `json:"code"`
Severity string `json:"severity,omitempty"`
Location location `json:"location"`
Message string `json:"message"`
}{
Code: p.Check,
Severity: severity(p.Severity),
Location: location{
File: p.Position.Filename,
Line: p.Position.Line,
Column: p.Position.Column,
},
Message: p.Text,
}
_ = json.NewEncoder(o.W).Encode(jp)
}
type Stylish struct {
W io.Writer
prevFile string
tw *tabwriter.Writer
}
func (o *Stylish) Format(p lint.Problem) {
if p.Position.Filename != o.prevFile {
if o.prevFile != "" {
o.tw.Flush()
fmt.Fprintln(o.W)
}
fmt.Fprintln(o.W, p.Position.Filename)
o.prevFile = p.Position.Filename
o.tw = tabwriter.NewWriter(o.W, 0, 4, 2, ' ', 0)
}
fmt.Fprintf(o.tw, " (%d, %d)\t%s\t%s\n", p.Position.Line, p.Position.Column, p.Check, p.Text)
}
func (o *Stylish) Stats(total, errors, warnings int) {
if o.tw != nil {
o.tw.Flush()
fmt.Fprintln(o.W)
}
fmt.Fprintf(o.W, " ✖ %d problems (%d errors, %d warnings)\n",
total, errors, warnings)
}

View file

@ -1,362 +0,0 @@
// Copyright (c) 2013 The Go Authors. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file or at
// https://developers.google.com/open-source/licenses/bsd.
// Package lintutil provides helpers for writing linter command lines.
package lintutil // import "honnef.co/go/tools/lint/lintutil"
import (
"errors"
"flag"
"fmt"
"go/build"
"go/token"
"log"
"os"
"regexp"
"runtime"
"runtime/pprof"
"strconv"
"strings"
"time"
"honnef.co/go/tools/config"
"honnef.co/go/tools/lint"
"honnef.co/go/tools/lint/lintutil/format"
"honnef.co/go/tools/version"
"golang.org/x/tools/go/packages"
)
func usage(name string, flags *flag.FlagSet) func() {
return func() {
fmt.Fprintf(os.Stderr, "Usage of %s:\n", name)
fmt.Fprintf(os.Stderr, "\t%s [flags] # runs on package in current directory\n", name)
fmt.Fprintf(os.Stderr, "\t%s [flags] packages\n", name)
fmt.Fprintf(os.Stderr, "\t%s [flags] directory\n", name)
fmt.Fprintf(os.Stderr, "\t%s [flags] files... # must be a single package\n", name)
fmt.Fprintf(os.Stderr, "Flags:\n")
flags.PrintDefaults()
}
}
func parseIgnore(s string) ([]lint.Ignore, error) {
var out []lint.Ignore
if len(s) == 0 {
return nil, nil
}
for _, part := range strings.Fields(s) {
p := strings.Split(part, ":")
if len(p) != 2 {
return nil, errors.New("malformed ignore string")
}
path := p[0]
checks := strings.Split(p[1], ",")
out = append(out, &lint.GlobIgnore{Pattern: path, Checks: checks})
}
return out, nil
}
type versionFlag int
func (v *versionFlag) String() string {
return fmt.Sprintf("1.%d", *v)
}
func (v *versionFlag) Set(s string) error {
if len(s) < 3 {
return errors.New("invalid Go version")
}
if s[0] != '1' {
return errors.New("invalid Go version")
}
if s[1] != '.' {
return errors.New("invalid Go version")
}
i, err := strconv.Atoi(s[2:])
*v = versionFlag(i)
return err
}
func (v *versionFlag) Get() interface{} {
return int(*v)
}
type list []string
func (list *list) String() string {
return `"` + strings.Join(*list, ",") + `"`
}
func (list *list) Set(s string) error {
if s == "" {
*list = nil
return nil
}
*list = strings.Split(s, ",")
return nil
}
func FlagSet(name string) *flag.FlagSet {
flags := flag.NewFlagSet("", flag.ExitOnError)
flags.Usage = usage(name, flags)
flags.String("tags", "", "List of `build tags`")
flags.String("ignore", "", "Deprecated: use linter directives instead")
flags.Bool("tests", true, "Include tests")
flags.Bool("version", false, "Print version and exit")
flags.Bool("show-ignored", false, "Don't filter ignored problems")
flags.String("f", "text", "Output `format` (valid choices are 'stylish', 'text' and 'json')")
flags.Int("debug.max-concurrent-jobs", 0, "Number of jobs to run concurrently")
flags.Bool("debug.print-stats", false, "Print debug statistics")
flags.String("debug.cpuprofile", "", "Write CPU profile to `file`")
flags.String("debug.memprofile", "", "Write memory profile to `file`")
checks := list{"inherit"}
fail := list{"all"}
flags.Var(&checks, "checks", "Comma-separated list of `checks` to enable.")
flags.Var(&fail, "fail", "Comma-separated list of `checks` that can cause a non-zero exit status.")
tags := build.Default.ReleaseTags
v := tags[len(tags)-1][2:]
version := new(versionFlag)
if err := version.Set(v); err != nil {
panic(fmt.Sprintf("internal error: %s", err))
}
flags.Var(version, "go", "Target Go `version` in the format '1.x'")
return flags
}
func ProcessFlagSet(cs []lint.Checker, fs *flag.FlagSet) {
tags := fs.Lookup("tags").Value.(flag.Getter).Get().(string)
ignore := fs.Lookup("ignore").Value.(flag.Getter).Get().(string)
tests := fs.Lookup("tests").Value.(flag.Getter).Get().(bool)
goVersion := fs.Lookup("go").Value.(flag.Getter).Get().(int)
formatter := fs.Lookup("f").Value.(flag.Getter).Get().(string)
printVersion := fs.Lookup("version").Value.(flag.Getter).Get().(bool)
showIgnored := fs.Lookup("show-ignored").Value.(flag.Getter).Get().(bool)
maxConcurrentJobs := fs.Lookup("debug.max-concurrent-jobs").Value.(flag.Getter).Get().(int)
printStats := fs.Lookup("debug.print-stats").Value.(flag.Getter).Get().(bool)
cpuProfile := fs.Lookup("debug.cpuprofile").Value.(flag.Getter).Get().(string)
memProfile := fs.Lookup("debug.memprofile").Value.(flag.Getter).Get().(string)
cfg := config.Config{}
cfg.Checks = *fs.Lookup("checks").Value.(*list)
exit := func(code int) {
if cpuProfile != "" {
pprof.StopCPUProfile()
}
if memProfile != "" {
f, err := os.Create(memProfile)
if err != nil {
panic(err)
}
runtime.GC()
pprof.WriteHeapProfile(f)
}
os.Exit(code)
}
if cpuProfile != "" {
f, err := os.Create(cpuProfile)
if err != nil {
log.Fatal(err)
}
pprof.StartCPUProfile(f)
}
if printVersion {
version.Print()
exit(0)
}
ps, err := Lint(cs, fs.Args(), &Options{
Tags: strings.Fields(tags),
LintTests: tests,
Ignores: ignore,
GoVersion: goVersion,
ReturnIgnored: showIgnored,
Config: cfg,
MaxConcurrentJobs: maxConcurrentJobs,
PrintStats: printStats,
})
if err != nil {
fmt.Fprintln(os.Stderr, err)
exit(1)
}
var f format.Formatter
switch formatter {
case "text":
f = format.Text{W: os.Stdout}
case "stylish":
f = &format.Stylish{W: os.Stdout}
case "json":
f = format.JSON{W: os.Stdout}
default:
fmt.Fprintf(os.Stderr, "unsupported output format %q\n", formatter)
exit(2)
}
var (
total int
errors int
warnings int
)
fail := *fs.Lookup("fail").Value.(*list)
var allChecks []string
for _, p := range ps {
allChecks = append(allChecks, p.Check)
}
shouldExit := lint.FilterChecks(allChecks, fail)
total = len(ps)
for _, p := range ps {
if shouldExit[p.Check] {
errors++
} else {
p.Severity = lint.Warning
warnings++
}
f.Format(p)
}
if f, ok := f.(format.Statter); ok {
f.Stats(total, errors, warnings)
}
if errors > 0 {
exit(1)
}
}
type Options struct {
Config config.Config
Tags []string
LintTests bool
Ignores string
GoVersion int
ReturnIgnored bool
MaxConcurrentJobs int
PrintStats bool
}
func Lint(cs []lint.Checker, paths []string, opt *Options) ([]lint.Problem, error) {
stats := lint.PerfStats{
CheckerInits: map[string]time.Duration{},
}
if opt == nil {
opt = &Options{}
}
ignores, err := parseIgnore(opt.Ignores)
if err != nil {
return nil, err
}
ctx := build.Default
// XXX nothing cares about built tags right now
ctx.BuildTags = opt.Tags
conf := &packages.Config{
Mode: packages.LoadAllSyntax,
Tests: opt.LintTests,
}
t := time.Now()
if len(paths) == 0 {
paths = []string{"."}
}
pkgs, err := packages.Load(conf, paths...)
if err != nil {
return nil, err
}
stats.PackageLoading = time.Since(t)
var problems []lint.Problem
workingPkgs := make([]*packages.Package, 0, len(pkgs))
for _, pkg := range pkgs {
if pkg.IllTyped {
problems = append(problems, compileErrors(pkg)...)
} else {
workingPkgs = append(workingPkgs, pkg)
}
}
if len(workingPkgs) == 0 {
return problems, nil
}
l := &lint.Linter{
Checkers: cs,
Ignores: ignores,
GoVersion: opt.GoVersion,
ReturnIgnored: opt.ReturnIgnored,
Config: opt.Config,
MaxConcurrentJobs: opt.MaxConcurrentJobs,
PrintStats: opt.PrintStats,
}
problems = append(problems, l.Lint(workingPkgs, &stats)...)
return problems, nil
}
var posRe = regexp.MustCompile(`^(.+?):(\d+):(\d+)?$`)
func parsePos(pos string) token.Position {
if pos == "-" || pos == "" {
return token.Position{}
}
parts := posRe.FindStringSubmatch(pos)
if parts == nil {
panic(fmt.Sprintf("internal error: malformed position %q", pos))
}
file := parts[0]
line, _ := strconv.Atoi(parts[1])
col, _ := strconv.Atoi(parts[2])
return token.Position{
Filename: file,
Line: line,
Column: col,
}
}
func compileErrors(pkg *packages.Package) []lint.Problem {
if !pkg.IllTyped {
return nil
}
if len(pkg.Errors) == 0 {
// transitively ill-typed
var ps []lint.Problem
for _, imp := range pkg.Imports {
ps = append(ps, compileErrors(imp)...)
}
return ps
}
var ps []lint.Problem
for _, err := range pkg.Errors {
p := lint.Problem{
Position: parsePos(err.Pos),
Text: err.Msg,
Checker: "compiler",
Check: "compile",
}
ps = append(ps, p)
}
return ps
}
func ProcessArgs(name string, cs []lint.Checker, args []string) {
flags := FlagSet(name)
flags.Parse(args)
ProcessFlagSet(cs, flags)
}

View file

@ -1,15 +0,0 @@
# Contributing to gosimple
## Before filing an issue:
### Are you having trouble building gosimple?
Check you have the latest version of its dependencies. Run
```
go get -u honnef.co/go/tools/simple
```
If you still have problems, consider searching for existing issues before filing a new issue.
## Before sending a pull request:
Have you understood the purpose of gosimple? Make sure to carefully read `README`.

File diff suppressed because it is too large Load diff

View file

@ -1,28 +0,0 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Copyright (c) 2016 Dominik Honnef. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -1,195 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssa
// Simple block optimizations to simplify the control flow graph.
// TODO(adonovan): opt: instead of creating several "unreachable" blocks
// per function in the Builder, reuse a single one (e.g. at Blocks[1])
// to reduce garbage.
import (
"fmt"
"os"
)
// If true, perform sanity checking and show progress at each
// successive iteration of optimizeBlocks. Very verbose.
const debugBlockOpt = false
// markReachable sets Index=-1 for all blocks reachable from b.
func markReachable(b *BasicBlock) {
b.Index = -1
for _, succ := range b.Succs {
if succ.Index == 0 {
markReachable(succ)
}
}
}
func DeleteUnreachableBlocks(f *Function) {
deleteUnreachableBlocks(f)
}
// deleteUnreachableBlocks marks all reachable blocks of f and
// eliminates (nils) all others, including possibly cyclic subgraphs.
//
func deleteUnreachableBlocks(f *Function) {
const white, black = 0, -1
// We borrow b.Index temporarily as the mark bit.
for _, b := range f.Blocks {
b.Index = white
}
markReachable(f.Blocks[0])
if f.Recover != nil {
markReachable(f.Recover)
}
for i, b := range f.Blocks {
if b.Index == white {
for _, c := range b.Succs {
if c.Index == black {
c.removePred(b) // delete white->black edge
}
}
if debugBlockOpt {
fmt.Fprintln(os.Stderr, "unreachable", b)
}
f.Blocks[i] = nil // delete b
}
}
f.removeNilBlocks()
}
// jumpThreading attempts to apply simple jump-threading to block b,
// in which a->b->c become a->c if b is just a Jump.
// The result is true if the optimization was applied.
//
func jumpThreading(f *Function, b *BasicBlock) bool {
if b.Index == 0 {
return false // don't apply to entry block
}
if b.Instrs == nil {
return false
}
if _, ok := b.Instrs[0].(*Jump); !ok {
return false // not just a jump
}
c := b.Succs[0]
if c == b {
return false // don't apply to degenerate jump-to-self.
}
if c.hasPhi() {
return false // not sound without more effort
}
for j, a := range b.Preds {
a.replaceSucc(b, c)
// If a now has two edges to c, replace its degenerate If by Jump.
if len(a.Succs) == 2 && a.Succs[0] == c && a.Succs[1] == c {
jump := new(Jump)
jump.setBlock(a)
a.Instrs[len(a.Instrs)-1] = jump
a.Succs = a.Succs[:1]
c.removePred(b)
} else {
if j == 0 {
c.replacePred(b, a)
} else {
c.Preds = append(c.Preds, a)
}
}
if debugBlockOpt {
fmt.Fprintln(os.Stderr, "jumpThreading", a, b, c)
}
}
f.Blocks[b.Index] = nil // delete b
return true
}
// fuseBlocks attempts to apply the block fusion optimization to block
// a, in which a->b becomes ab if len(a.Succs)==len(b.Preds)==1.
// The result is true if the optimization was applied.
//
func fuseBlocks(f *Function, a *BasicBlock) bool {
if len(a.Succs) != 1 {
return false
}
b := a.Succs[0]
if len(b.Preds) != 1 {
return false
}
// Degenerate &&/|| ops may result in a straight-line CFG
// containing φ-nodes. (Ideally we'd replace such them with
// their sole operand but that requires Referrers, built later.)
if b.hasPhi() {
return false // not sound without further effort
}
// Eliminate jump at end of A, then copy all of B across.
a.Instrs = append(a.Instrs[:len(a.Instrs)-1], b.Instrs...)
for _, instr := range b.Instrs {
instr.setBlock(a)
}
// A inherits B's successors
a.Succs = append(a.succs2[:0], b.Succs...)
// Fix up Preds links of all successors of B.
for _, c := range b.Succs {
c.replacePred(b, a)
}
if debugBlockOpt {
fmt.Fprintln(os.Stderr, "fuseBlocks", a, b)
}
f.Blocks[b.Index] = nil // delete b
return true
}
func OptimizeBlocks(f *Function) {
optimizeBlocks(f)
}
// optimizeBlocks() performs some simple block optimizations on a
// completed function: dead block elimination, block fusion, jump
// threading.
//
func optimizeBlocks(f *Function) {
deleteUnreachableBlocks(f)
// Loop until no further progress.
changed := true
for changed {
changed = false
if debugBlockOpt {
f.WriteTo(os.Stderr)
mustSanityCheck(f, nil)
}
for _, b := range f.Blocks {
// f.Blocks will temporarily contain nils to indicate
// deleted blocks; we remove them at the end.
if b == nil {
continue
}
// Fuse blocks. b->c becomes bc.
if fuseBlocks(f, b) {
changed = true
}
// a->b->c becomes a->c if b contains only a Jump.
if jumpThreading(f, b) {
changed = true
continue // (b was disconnected)
}
}
}
f.removeNilBlocks()
}

File diff suppressed because it is too large Load diff

View file

@ -1,169 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssa
// This file defines the Const SSA value type.
import (
"fmt"
exact "go/constant"
"go/token"
"go/types"
"strconv"
)
// NewConst returns a new constant of the specified value and type.
// val must be valid according to the specification of Const.Value.
//
func NewConst(val exact.Value, typ types.Type) *Const {
return &Const{typ, val}
}
// intConst returns an 'int' constant that evaluates to i.
// (i is an int64 in case the host is narrower than the target.)
func intConst(i int64) *Const {
return NewConst(exact.MakeInt64(i), tInt)
}
// nilConst returns a nil constant of the specified type, which may
// be any reference type, including interfaces.
//
func nilConst(typ types.Type) *Const {
return NewConst(nil, typ)
}
// stringConst returns a 'string' constant that evaluates to s.
func stringConst(s string) *Const {
return NewConst(exact.MakeString(s), tString)
}
// zeroConst returns a new "zero" constant of the specified type,
// which must not be an array or struct type: the zero values of
// aggregates are well-defined but cannot be represented by Const.
//
func zeroConst(t types.Type) *Const {
switch t := t.(type) {
case *types.Basic:
switch {
case t.Info()&types.IsBoolean != 0:
return NewConst(exact.MakeBool(false), t)
case t.Info()&types.IsNumeric != 0:
return NewConst(exact.MakeInt64(0), t)
case t.Info()&types.IsString != 0:
return NewConst(exact.MakeString(""), t)
case t.Kind() == types.UnsafePointer:
fallthrough
case t.Kind() == types.UntypedNil:
return nilConst(t)
default:
panic(fmt.Sprint("zeroConst for unexpected type:", t))
}
case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
return nilConst(t)
case *types.Named:
return NewConst(zeroConst(t.Underlying()).Value, t)
case *types.Array, *types.Struct, *types.Tuple:
panic(fmt.Sprint("zeroConst applied to aggregate:", t))
}
panic(fmt.Sprint("zeroConst: unexpected ", t))
}
func (c *Const) RelString(from *types.Package) string {
var s string
if c.Value == nil {
s = "nil"
} else if c.Value.Kind() == exact.String {
s = exact.StringVal(c.Value)
const max = 20
// TODO(adonovan): don't cut a rune in half.
if len(s) > max {
s = s[:max-3] + "..." // abbreviate
}
s = strconv.Quote(s)
} else {
s = c.Value.String()
}
return s + ":" + relType(c.Type(), from)
}
func (c *Const) Name() string {
return c.RelString(nil)
}
func (c *Const) String() string {
return c.Name()
}
func (c *Const) Type() types.Type {
return c.typ
}
func (c *Const) Referrers() *[]Instruction {
return nil
}
func (c *Const) Parent() *Function { return nil }
func (c *Const) Pos() token.Pos {
return token.NoPos
}
// IsNil returns true if this constant represents a typed or untyped nil value.
func (c *Const) IsNil() bool {
return c.Value == nil
}
// TODO(adonovan): move everything below into honnef.co/go/tools/ssa/interp.
// Int64 returns the numeric value of this constant truncated to fit
// a signed 64-bit integer.
//
func (c *Const) Int64() int64 {
switch x := exact.ToInt(c.Value); x.Kind() {
case exact.Int:
if i, ok := exact.Int64Val(x); ok {
return i
}
return 0
case exact.Float:
f, _ := exact.Float64Val(x)
return int64(f)
}
panic(fmt.Sprintf("unexpected constant value: %T", c.Value))
}
// Uint64 returns the numeric value of this constant truncated to fit
// an unsigned 64-bit integer.
//
func (c *Const) Uint64() uint64 {
switch x := exact.ToInt(c.Value); x.Kind() {
case exact.Int:
if u, ok := exact.Uint64Val(x); ok {
return u
}
return 0
case exact.Float:
f, _ := exact.Float64Val(x)
return uint64(f)
}
panic(fmt.Sprintf("unexpected constant value: %T", c.Value))
}
// Float64 returns the numeric value of this constant truncated to fit
// a float64.
//
func (c *Const) Float64() float64 {
f, _ := exact.Float64Val(c.Value)
return f
}
// Complex128 returns the complex value of this constant truncated to
// fit a complex128.
//
func (c *Const) Complex128() complex128 {
re, _ := exact.Float64Val(exact.Real(c.Value))
im, _ := exact.Float64Val(exact.Imag(c.Value))
return complex(re, im)
}

View file

@ -1,263 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ssa
// This file implements the CREATE phase of SSA construction.
// See builder.go for explanation.
import (
"fmt"
"go/ast"
"go/token"
"go/types"
"os"
"sync"
"golang.org/x/tools/go/types/typeutil"
)
// NewProgram returns a new SSA Program.
//
// mode controls diagnostics and checking during SSA construction.
//
func NewProgram(fset *token.FileSet, mode BuilderMode) *Program {
prog := &Program{
Fset: fset,
imported: make(map[string]*Package),
packages: make(map[*types.Package]*Package),
thunks: make(map[selectionKey]*Function),
bounds: make(map[*types.Func]*Function),
mode: mode,
}
h := typeutil.MakeHasher() // protected by methodsMu, in effect
prog.methodSets.SetHasher(h)
prog.canon.SetHasher(h)
return prog
}
// memberFromObject populates package pkg with a member for the
// typechecker object obj.
//
// For objects from Go source code, syntax is the associated syntax
// tree (for funcs and vars only); it will be used during the build
// phase.
//
func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) {
name := obj.Name()
switch obj := obj.(type) {
case *types.Builtin:
if pkg.Pkg != types.Unsafe {
panic("unexpected builtin object: " + obj.String())
}
case *types.TypeName:
pkg.Members[name] = &Type{
object: obj,
pkg: pkg,
}
case *types.Const:
c := &NamedConst{
object: obj,
Value: NewConst(obj.Val(), obj.Type()),
pkg: pkg,
}
pkg.values[obj] = c.Value
pkg.Members[name] = c
case *types.Var:
g := &Global{
Pkg: pkg,
name: name,
object: obj,
typ: types.NewPointer(obj.Type()), // address
pos: obj.Pos(),
}
pkg.values[obj] = g
pkg.Members[name] = g
case *types.Func:
sig := obj.Type().(*types.Signature)
if sig.Recv() == nil && name == "init" {
pkg.ninit++
name = fmt.Sprintf("init#%d", pkg.ninit)
}
fn := &Function{
name: name,
object: obj,
Signature: sig,
syntax: syntax,
pos: obj.Pos(),
Pkg: pkg,
Prog: pkg.Prog,
}
if syntax == nil {
fn.Synthetic = "loaded from gc object file"
}
pkg.values[obj] = fn
if sig.Recv() == nil {
pkg.Members[name] = fn // package-level function
}
default: // (incl. *types.Package)
panic("unexpected Object type: " + obj.String())
}
}
// membersFromDecl populates package pkg with members for each
// typechecker object (var, func, const or type) associated with the
// specified decl.
//
func membersFromDecl(pkg *Package, decl ast.Decl) {
switch decl := decl.(type) {
case *ast.GenDecl: // import, const, type or var
switch decl.Tok {
case token.CONST:
for _, spec := range decl.Specs {
for _, id := range spec.(*ast.ValueSpec).Names {
if !isBlankIdent(id) {
memberFromObject(pkg, pkg.info.Defs[id], nil)
}
}
}
case token.VAR:
for _, spec := range decl.Specs {
for _, id := range spec.(*ast.ValueSpec).Names {
if !isBlankIdent(id) {
memberFromObject(pkg, pkg.info.Defs[id], spec)
}
}
}
case token.TYPE:
for _, spec := range decl.Specs {
id := spec.(*ast.TypeSpec).Name
if !isBlankIdent(id) {
memberFromObject(pkg, pkg.info.Defs[id], nil)
}
}
}
case *ast.FuncDecl:
id := decl.Name
if !isBlankIdent(id) {
memberFromObject(pkg, pkg.info.Defs[id], decl)
}
}
}
// CreatePackage constructs and returns an SSA Package from the
// specified type-checked, error-free file ASTs, and populates its
// Members mapping.
//
// importable determines whether this package should be returned by a
// subsequent call to ImportedPackage(pkg.Path()).
//
// The real work of building SSA form for each function is not done
// until a subsequent call to Package.Build().
//
func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info *types.Info, importable bool) *Package {
p := &Package{
Prog: prog,
Members: make(map[string]Member),
values: make(map[types.Object]Value),
Pkg: pkg,
info: info, // transient (CREATE and BUILD phases)
files: files, // transient (CREATE and BUILD phases)
}
// Add init() function.
p.init = &Function{
name: "init",
Signature: new(types.Signature),
Synthetic: "package initializer",
Pkg: p,
Prog: prog,
}
p.Members[p.init.name] = p.init
// CREATE phase.
// Allocate all package members: vars, funcs, consts and types.
if len(files) > 0 {
// Go source package.
for _, file := range files {
for _, decl := range file.Decls {
membersFromDecl(p, decl)
}
}
} else {
// GC-compiled binary package (or "unsafe")
// No code.
// No position information.
scope := p.Pkg.Scope()
for _, name := range scope.Names() {
obj := scope.Lookup(name)
memberFromObject(p, obj, nil)
if obj, ok := obj.(*types.TypeName); ok {
if named, ok := obj.Type().(*types.Named); ok {
for i, n := 0, named.NumMethods(); i < n; i++ {
memberFromObject(p, named.Method(i), nil)
}
}
}
}
}
if prog.mode&BareInits == 0 {
// Add initializer guard variable.
initguard := &Global{
Pkg: p,
name: "init$guard",
typ: types.NewPointer(tBool),
}
p.Members[initguard.Name()] = initguard
}
if prog.mode&GlobalDebug != 0 {
p.SetDebugMode(true)
}
if prog.mode&PrintPackages != 0 {
printMu.Lock()
p.WriteTo(os.Stdout)
printMu.Unlock()
}
if importable {
prog.imported[p.Pkg.Path()] = p
}
prog.packages[p.Pkg] = p
return p
}
// printMu serializes printing of Packages/Functions to stdout.
var printMu sync.Mutex
// AllPackages returns a new slice containing all packages in the
// program prog in unspecified order.
//
func (prog *Program) AllPackages() []*Package {
pkgs := make([]*Package, 0, len(prog.packages))
for _, pkg := range prog.packages {
pkgs = append(pkgs, pkg)
}
return pkgs
}
// ImportedPackage returns the importable SSA Package whose import
// path is path, or nil if no such SSA package has been created.
//
// Not all packages are importable. For example, no import
// declaration can resolve to the x_test package created by 'go test'
// or the ad-hoc main package created 'go build foo.go'.
//
func (prog *Program) ImportedPackage(path string) *Package {
return prog.imported[path]
}

View file

@ -1,123 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ssa defines a representation of the elements of Go programs
// (packages, types, functions, variables and constants) using a
// static single-assignment (SSA) form intermediate representation
// (IR) for the bodies of functions.
//
// THIS INTERFACE IS EXPERIMENTAL AND IS LIKELY TO CHANGE.
//
// For an introduction to SSA form, see
// http://en.wikipedia.org/wiki/Static_single_assignment_form.
// This page provides a broader reading list:
// http://www.dcs.gla.ac.uk/~jsinger/ssa.html.
//
// The level of abstraction of the SSA form is intentionally close to
// the source language to facilitate construction of source analysis
// tools. It is not intended for machine code generation.
//
// All looping, branching and switching constructs are replaced with
// unstructured control flow. Higher-level control flow constructs
// such as multi-way branch can be reconstructed as needed; see
// ssautil.Switches() for an example.
//
// To construct an SSA-form program, call ssautil.CreateProgram on a
// loader.Program, a set of type-checked packages created from
// parsed Go source files. The resulting ssa.Program contains all the
// packages and their members, but SSA code is not created for
// function bodies until a subsequent call to (*Package).Build.
//
// The builder initially builds a naive SSA form in which all local
// variables are addresses of stack locations with explicit loads and
// stores. Registerisation of eligible locals and φ-node insertion
// using dominance and dataflow are then performed as a second pass
// called "lifting" to improve the accuracy and performance of
// subsequent analyses; this pass can be skipped by setting the
// NaiveForm builder flag.
//
// The primary interfaces of this package are:
//
// - Member: a named member of a Go package.
// - Value: an expression that yields a value.
// - Instruction: a statement that consumes values and performs computation.
// - Node: a Value or Instruction (emphasizing its membership in the SSA value graph)
//
// A computation that yields a result implements both the Value and
// Instruction interfaces. The following table shows for each
// concrete type which of these interfaces it implements.
//
// Value? Instruction? Member?
// *Alloc ✔ ✔
// *BinOp ✔ ✔
// *Builtin ✔
// *Call ✔ ✔
// *ChangeInterface ✔ ✔
// *ChangeType ✔ ✔
// *Const ✔
// *Convert ✔ ✔
// *DebugRef ✔
// *Defer ✔
// *Extract ✔ ✔
// *Field ✔ ✔
// *FieldAddr ✔ ✔
// *FreeVar ✔
// *Function ✔ ✔ (func)
// *Global ✔ ✔ (var)
// *Go ✔
// *If ✔
// *Index ✔ ✔
// *IndexAddr ✔ ✔
// *Jump ✔
// *Lookup ✔ ✔
// *MakeChan ✔ ✔
// *MakeClosure ✔ ✔
// *MakeInterface ✔ ✔
// *MakeMap ✔ ✔
// *MakeSlice ✔ ✔
// *MapUpdate ✔
// *NamedConst ✔ (const)
// *Next ✔ ✔
// *Panic ✔
// *Parameter ✔
// *Phi ✔ ✔
// *Range ✔ ✔
// *Return ✔
// *RunDefers ✔
// *Select ✔ ✔
// *Send ✔
// *Slice ✔ ✔
// *Store ✔
// *Type ✔ (type)
// *TypeAssert ✔ ✔
// *UnOp ✔ ✔
//
// Other key types in this package include: Program, Package, Function
// and BasicBlock.
//
// The program representation constructed by this package is fully
// resolved internally, i.e. it does not rely on the names of Values,
// Packages, Functions, Types or BasicBlocks for the correct
// interpretation of the program. Only the identities of objects and
// the topology of the SSA and type graphs are semantically
// significant. (There is one exception: Ids, used to identify field
// and method names, contain strings.) Avoidance of name-based
// operations simplifies the implementation of subsequent passes and
// can make them very efficient. Many objects are nonetheless named
// to aid in debugging, but it is not essential that the names be
// either accurate or unambiguous. The public API exposes a number of
// name-based maps for client convenience.
//
// The ssa/ssautil package provides various utilities that depend only
// on the public API of this package.
//
// TODO(adonovan): Consider the exceptional control-flow implications
// of defer and recover().
//
// TODO(adonovan): write a how-to document for all the various cases
// of trying to determine corresponding elements across the four
// domains of source locations, ast.Nodes, types.Objects,
// ssa.Values/Instructions.
//
package ssa // import "honnef.co/go/tools/ssa"

Some files were not shown because too many files have changed in this diff Show more