diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile deleted file mode 100644 index 53fc525797..0000000000 --- a/vendor/golang.org/x/net/http2/Dockerfile +++ /dev/null @@ -1,51 +0,0 @@ -# -# This Dockerfile builds a recent curl with HTTP/2 client support, using -# a recent nghttp2 build. -# -# See the Makefile for how to tag it. If Docker and that image is found, the -# Go tests use this curl binary for integration tests. -# - -FROM ubuntu:trusty - -RUN apt-get update && \ - apt-get upgrade -y && \ - apt-get install -y git-core build-essential wget - -RUN apt-get install -y --no-install-recommends \ - autotools-dev libtool pkg-config zlib1g-dev \ - libcunit1-dev libssl-dev libxml2-dev libevent-dev \ - automake autoconf - -# The list of packages nghttp2 recommends for h2load: -RUN apt-get install -y --no-install-recommends make binutils \ - autoconf automake autotools-dev \ - libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \ - libev-dev libevent-dev libjansson-dev libjemalloc-dev \ - cython python3.4-dev python-setuptools - -# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached: -ENV NGHTTP2_VER 895da9a -RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git - -WORKDIR /root/nghttp2 -RUN git reset --hard $NGHTTP2_VER -RUN autoreconf -i -RUN automake -RUN autoconf -RUN ./configure -RUN make -RUN make install - -WORKDIR /root -RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz -RUN tar -zxvf curl-7.45.0.tar.gz -WORKDIR /root/curl-7.45.0 -RUN ./configure --with-ssl --with-nghttp2=/usr/local -RUN make -RUN make install -RUN ldconfig - -CMD ["-h"] -ENTRYPOINT ["/usr/local/bin/curl"] - diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile deleted file mode 100644 index 55fd826f77..0000000000 --- a/vendor/golang.org/x/net/http2/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -curlimage: - docker build -t gohttp2/curl . - diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README deleted file mode 100644 index 360d5aa379..0000000000 --- a/vendor/golang.org/x/net/http2/README +++ /dev/null @@ -1,20 +0,0 @@ -This is a work-in-progress HTTP/2 implementation for Go. - -It will eventually live in the Go standard library and won't require -any changes to your code to use. It will just be automatic. - -Status: - -* The server support is pretty good. A few things are missing - but are being worked on. -* The client work has just started but shares a lot of code - is coming along much quicker. - -Docs are at https://godoc.org/golang.org/x/net/http2 - -Demo test server at https://http2.golang.org/ - -Help & bug reports welcome! - -Contributing: https://golang.org/doc/contribute.html -Bugs: https://golang.org/issue/new?title=x/net/http2:+ diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go deleted file mode 100644 index b13941258a..0000000000 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ /dev/null @@ -1,256 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Transport code's client connection pooling. - -package http2 - -import ( - "crypto/tls" - "net/http" - "sync" -) - -// ClientConnPool manages a pool of HTTP/2 client connections. -type ClientConnPool interface { - GetClientConn(req *http.Request, addr string) (*ClientConn, error) - MarkDead(*ClientConn) -} - -// clientConnPoolIdleCloser is the interface implemented by ClientConnPool -// implementations which can close their idle connections. -type clientConnPoolIdleCloser interface { - ClientConnPool - closeIdleConnections() -} - -var ( - _ clientConnPoolIdleCloser = (*clientConnPool)(nil) - _ clientConnPoolIdleCloser = noDialClientConnPool{} -) - -// TODO: use singleflight for dialing and addConnCalls? -type clientConnPool struct { - t *Transport - - mu sync.Mutex // TODO: maybe switch to RWMutex - // TODO: add support for sharing conns based on cert names - // (e.g. share conn for googleapis.com and appspot.com) - conns map[string][]*ClientConn // key is host:port - dialing map[string]*dialCall // currently in-flight dials - keys map[*ClientConn][]string - addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls -} - -func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { - return p.getClientConn(req, addr, dialOnMiss) -} - -const ( - dialOnMiss = true - noDialOnMiss = false -) - -func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { - if isConnectionCloseRequest(req) && dialOnMiss { - // It gets its own connection. - const singleUse = true - cc, err := p.t.dialClientConn(addr, singleUse) - if err != nil { - return nil, err - } - return cc, nil - } - p.mu.Lock() - for _, cc := range p.conns[addr] { - if cc.CanTakeNewRequest() { - p.mu.Unlock() - return cc, nil - } - } - if !dialOnMiss { - p.mu.Unlock() - return nil, ErrNoCachedConn - } - call := p.getStartDialLocked(addr) - p.mu.Unlock() - <-call.done - return call.res, call.err -} - -// dialCall is an in-flight Transport dial call to a host. -type dialCall struct { - p *clientConnPool - done chan struct{} // closed when done - res *ClientConn // valid after done is closed - err error // valid after done is closed -} - -// requires p.mu is held. -func (p *clientConnPool) getStartDialLocked(addr string) *dialCall { - if call, ok := p.dialing[addr]; ok { - // A dial is already in-flight. Don't start another. - return call - } - call := &dialCall{p: p, done: make(chan struct{})} - if p.dialing == nil { - p.dialing = make(map[string]*dialCall) - } - p.dialing[addr] = call - go call.dial(addr) - return call -} - -// run in its own goroutine. -func (c *dialCall) dial(addr string) { - const singleUse = false // shared conn - c.res, c.err = c.p.t.dialClientConn(addr, singleUse) - close(c.done) - - c.p.mu.Lock() - delete(c.p.dialing, addr) - if c.err == nil { - c.p.addConnLocked(addr, c.res) - } - c.p.mu.Unlock() -} - -// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't -// already exist. It coalesces concurrent calls with the same key. -// This is used by the http1 Transport code when it creates a new connection. Because -// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know -// the protocol), it can get into a situation where it has multiple TLS connections. -// This code decides which ones live or die. -// The return value used is whether c was used. -// c is never closed. -func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) { - p.mu.Lock() - for _, cc := range p.conns[key] { - if cc.CanTakeNewRequest() { - p.mu.Unlock() - return false, nil - } - } - call, dup := p.addConnCalls[key] - if !dup { - if p.addConnCalls == nil { - p.addConnCalls = make(map[string]*addConnCall) - } - call = &addConnCall{ - p: p, - done: make(chan struct{}), - } - p.addConnCalls[key] = call - go call.run(t, key, c) - } - p.mu.Unlock() - - <-call.done - if call.err != nil { - return false, call.err - } - return !dup, nil -} - -type addConnCall struct { - p *clientConnPool - done chan struct{} // closed when done - err error -} - -func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) { - cc, err := t.NewClientConn(tc) - - p := c.p - p.mu.Lock() - if err != nil { - c.err = err - } else { - p.addConnLocked(key, cc) - } - delete(p.addConnCalls, key) - p.mu.Unlock() - close(c.done) -} - -func (p *clientConnPool) addConn(key string, cc *ClientConn) { - p.mu.Lock() - p.addConnLocked(key, cc) - p.mu.Unlock() -} - -// p.mu must be held -func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) { - for _, v := range p.conns[key] { - if v == cc { - return - } - } - if p.conns == nil { - p.conns = make(map[string][]*ClientConn) - } - if p.keys == nil { - p.keys = make(map[*ClientConn][]string) - } - p.conns[key] = append(p.conns[key], cc) - p.keys[cc] = append(p.keys[cc], key) -} - -func (p *clientConnPool) MarkDead(cc *ClientConn) { - p.mu.Lock() - defer p.mu.Unlock() - for _, key := range p.keys[cc] { - vv, ok := p.conns[key] - if !ok { - continue - } - newList := filterOutClientConn(vv, cc) - if len(newList) > 0 { - p.conns[key] = newList - } else { - delete(p.conns, key) - } - } - delete(p.keys, cc) -} - -func (p *clientConnPool) closeIdleConnections() { - p.mu.Lock() - defer p.mu.Unlock() - // TODO: don't close a cc if it was just added to the pool - // milliseconds ago and has never been used. There's currently - // a small race window with the HTTP/1 Transport's integration - // where it can add an idle conn just before using it, and - // somebody else can concurrently call CloseIdleConns and - // break some caller's RoundTrip. - for _, vv := range p.conns { - for _, cc := range vv { - cc.closeIfIdle() - } - } -} - -func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { - out := in[:0] - for _, v := range in { - if v != exclude { - out = append(out, v) - } - } - // If we filtered it out, zero out the last item to prevent - // the GC from seeing it. - if len(in) != len(out) { - in[len(in)-1] = nil - } - return out -} - -// noDialClientConnPool is an implementation of http2.ClientConnPool -// which never dials. We let the HTTP/1.1 client dial and use its TLS -// connection instead. -type noDialClientConnPool struct{ *clientConnPool } - -func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) { - return p.getClientConn(req, addr, noDialOnMiss) -} diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go deleted file mode 100644 index 4f720f530b..0000000000 --- a/vendor/golang.org/x/net/http2/configure_transport.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.6 - -package http2 - -import ( - "crypto/tls" - "fmt" - "net/http" -) - -func configureTransport(t1 *http.Transport) (*Transport, error) { - connPool := new(clientConnPool) - t2 := &Transport{ - ConnPool: noDialClientConnPool{connPool}, - t1: t1, - } - connPool.t = t2 - if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { - return nil, err - } - if t1.TLSClientConfig == nil { - t1.TLSClientConfig = new(tls.Config) - } - if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { - t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) - } - if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { - t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") - } - upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { - addr := authorityAddr("https", authority) - if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { - go c.Close() - return erringRoundTripper{err} - } else if !used { - // Turns out we don't need this c. - // For example, two goroutines made requests to the same host - // at the same time, both kicking off TCP dials. (since protocol - // was unknown) - go c.Close() - } - return t2 - } - if m := t1.TLSNextProto; len(m) == 0 { - t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ - "h2": upgradeFn, - } - } else { - m["h2"] = upgradeFn - } - return t2, nil -} - -// registerHTTPSProtocol calls Transport.RegisterProtocol but -// convering panics into errors. -func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { - defer func() { - if e := recover(); e != nil { - err = fmt.Errorf("%v", e) - } - }() - t.RegisterProtocol("https", rt) - return nil -} - -// noDialH2RoundTripper is a RoundTripper which only tries to complete the request -// if there's already has a cached connection to the host. -type noDialH2RoundTripper struct{ t *Transport } - -func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - res, err := rt.t.RoundTrip(req) - if err == ErrNoCachedConn { - return nil, http.ErrSkipAltProtocol - } - return res, err -} diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go deleted file mode 100644 index 20fd7626a5..0000000000 --- a/vendor/golang.org/x/net/http2/errors.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" - "fmt" -) - -// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec. -type ErrCode uint32 - -const ( - ErrCodeNo ErrCode = 0x0 - ErrCodeProtocol ErrCode = 0x1 - ErrCodeInternal ErrCode = 0x2 - ErrCodeFlowControl ErrCode = 0x3 - ErrCodeSettingsTimeout ErrCode = 0x4 - ErrCodeStreamClosed ErrCode = 0x5 - ErrCodeFrameSize ErrCode = 0x6 - ErrCodeRefusedStream ErrCode = 0x7 - ErrCodeCancel ErrCode = 0x8 - ErrCodeCompression ErrCode = 0x9 - ErrCodeConnect ErrCode = 0xa - ErrCodeEnhanceYourCalm ErrCode = 0xb - ErrCodeInadequateSecurity ErrCode = 0xc - ErrCodeHTTP11Required ErrCode = 0xd -) - -var errCodeName = map[ErrCode]string{ - ErrCodeNo: "NO_ERROR", - ErrCodeProtocol: "PROTOCOL_ERROR", - ErrCodeInternal: "INTERNAL_ERROR", - ErrCodeFlowControl: "FLOW_CONTROL_ERROR", - ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT", - ErrCodeStreamClosed: "STREAM_CLOSED", - ErrCodeFrameSize: "FRAME_SIZE_ERROR", - ErrCodeRefusedStream: "REFUSED_STREAM", - ErrCodeCancel: "CANCEL", - ErrCodeCompression: "COMPRESSION_ERROR", - ErrCodeConnect: "CONNECT_ERROR", - ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM", - ErrCodeInadequateSecurity: "INADEQUATE_SECURITY", - ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED", -} - -func (e ErrCode) String() string { - if s, ok := errCodeName[e]; ok { - return s - } - return fmt.Sprintf("unknown error code 0x%x", uint32(e)) -} - -// ConnectionError is an error that results in the termination of the -// entire connection. -type ConnectionError ErrCode - -func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) } - -// StreamError is an error that only affects one stream within an -// HTTP/2 connection. -type StreamError struct { - StreamID uint32 - Code ErrCode - Cause error // optional additional detail -} - -func streamError(id uint32, code ErrCode) StreamError { - return StreamError{StreamID: id, Code: code} -} - -func (e StreamError) Error() string { - if e.Cause != nil { - return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause) - } - return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code) -} - -// 6.9.1 The Flow Control Window -// "If a sender receives a WINDOW_UPDATE that causes a flow control -// window to exceed this maximum it MUST terminate either the stream -// or the connection, as appropriate. For streams, [...]; for the -// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code." -type goAwayFlowError struct{} - -func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } - -// connErrorReason wraps a ConnectionError with an informative error about why it occurs. - -// Errors of this type are only returned by the frame parser functions -// and converted into ConnectionError(ErrCodeProtocol). -type connError struct { - Code ErrCode - Reason string -} - -func (e connError) Error() string { - return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason) -} - -type pseudoHeaderError string - -func (e pseudoHeaderError) Error() string { - return fmt.Sprintf("invalid pseudo-header %q", string(e)) -} - -type duplicatePseudoHeaderError string - -func (e duplicatePseudoHeaderError) Error() string { - return fmt.Sprintf("duplicate pseudo-header %q", string(e)) -} - -type headerFieldNameError string - -func (e headerFieldNameError) Error() string { - return fmt.Sprintf("invalid header field name %q", string(e)) -} - -type headerFieldValueError string - -func (e headerFieldValueError) Error() string { - return fmt.Sprintf("invalid header field value %q", string(e)) -} - -var ( - errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers") - errPseudoAfterRegular = errors.New("pseudo header field after regular") -) diff --git a/vendor/golang.org/x/net/http2/fixed_buffer.go b/vendor/golang.org/x/net/http2/fixed_buffer.go deleted file mode 100644 index 47da0f0bf7..0000000000 --- a/vendor/golang.org/x/net/http2/fixed_buffer.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" -) - -// fixedBuffer is an io.ReadWriter backed by a fixed size buffer. -// It never allocates, but moves old data as new data is written. -type fixedBuffer struct { - buf []byte - r, w int -} - -var ( - errReadEmpty = errors.New("read from empty fixedBuffer") - errWriteFull = errors.New("write on full fixedBuffer") -) - -// Read copies bytes from the buffer into p. -// It is an error to read when no data is available. -func (b *fixedBuffer) Read(p []byte) (n int, err error) { - if b.r == b.w { - return 0, errReadEmpty - } - n = copy(p, b.buf[b.r:b.w]) - b.r += n - if b.r == b.w { - b.r = 0 - b.w = 0 - } - return n, nil -} - -// Len returns the number of bytes of the unread portion of the buffer. -func (b *fixedBuffer) Len() int { - return b.w - b.r -} - -// Write copies bytes from p into the buffer. -// It is an error to write more data than the buffer can hold. -func (b *fixedBuffer) Write(p []byte) (n int, err error) { - // Slide existing data to beginning. - if b.r > 0 && len(p) > len(b.buf)-b.w { - copy(b.buf, b.buf[b.r:b.w]) - b.w -= b.r - b.r = 0 - } - - // Write new data. - n = copy(b.buf[b.w:], p) - b.w += n - if n < len(p) { - err = errWriteFull - } - return n, err -} diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go deleted file mode 100644 index 957de25420..0000000000 --- a/vendor/golang.org/x/net/http2/flow.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Flow control - -package http2 - -// flow is the flow control window's size. -type flow struct { - // n is the number of DATA bytes we're allowed to send. - // A flow is kept both on a conn and a per-stream. - n int32 - - // conn points to the shared connection-level flow that is - // shared by all streams on that conn. It is nil for the flow - // that's on the conn directly. - conn *flow -} - -func (f *flow) setConnFlow(cf *flow) { f.conn = cf } - -func (f *flow) available() int32 { - n := f.n - if f.conn != nil && f.conn.n < n { - n = f.conn.n - } - return n -} - -func (f *flow) take(n int32) { - if n > f.available() { - panic("internal error: took too much") - } - f.n -= n - if f.conn != nil { - f.conn.n -= n - } -} - -// add adds n bytes (positive or negative) to the flow control window. -// It returns false if the sum would exceed 2^31-1. -func (f *flow) add(n int32) bool { - remain := (1<<31 - 1) - f.n - if n > remain { - return false - } - f.n += n - return true -} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go deleted file mode 100644 index c9b09bb675..0000000000 --- a/vendor/golang.org/x/net/http2/frame.go +++ /dev/null @@ -1,1539 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "log" - "strings" - "sync" - - "golang.org/x/net/http2/hpack" - "golang.org/x/net/lex/httplex" -) - -const frameHeaderLen = 9 - -var padZeros = make([]byte, 255) // zeros for padding - -// A FrameType is a registered frame type as defined in -// http://http2.github.io/http2-spec/#rfc.section.11.2 -type FrameType uint8 - -const ( - FrameData FrameType = 0x0 - FrameHeaders FrameType = 0x1 - FramePriority FrameType = 0x2 - FrameRSTStream FrameType = 0x3 - FrameSettings FrameType = 0x4 - FramePushPromise FrameType = 0x5 - FramePing FrameType = 0x6 - FrameGoAway FrameType = 0x7 - FrameWindowUpdate FrameType = 0x8 - FrameContinuation FrameType = 0x9 -) - -var frameName = map[FrameType]string{ - FrameData: "DATA", - FrameHeaders: "HEADERS", - FramePriority: "PRIORITY", - FrameRSTStream: "RST_STREAM", - FrameSettings: "SETTINGS", - FramePushPromise: "PUSH_PROMISE", - FramePing: "PING", - FrameGoAway: "GOAWAY", - FrameWindowUpdate: "WINDOW_UPDATE", - FrameContinuation: "CONTINUATION", -} - -func (t FrameType) String() string { - if s, ok := frameName[t]; ok { - return s - } - return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t)) -} - -// Flags is a bitmask of HTTP/2 flags. -// The meaning of flags varies depending on the frame type. -type Flags uint8 - -// Has reports whether f contains all (0 or more) flags in v. -func (f Flags) Has(v Flags) bool { - return (f & v) == v -} - -// Frame-specific FrameHeader flag bits. -const ( - // Data Frame - FlagDataEndStream Flags = 0x1 - FlagDataPadded Flags = 0x8 - - // Headers Frame - FlagHeadersEndStream Flags = 0x1 - FlagHeadersEndHeaders Flags = 0x4 - FlagHeadersPadded Flags = 0x8 - FlagHeadersPriority Flags = 0x20 - - // Settings Frame - FlagSettingsAck Flags = 0x1 - - // Ping Frame - FlagPingAck Flags = 0x1 - - // Continuation Frame - FlagContinuationEndHeaders Flags = 0x4 - - FlagPushPromiseEndHeaders Flags = 0x4 - FlagPushPromisePadded Flags = 0x8 -) - -var flagName = map[FrameType]map[Flags]string{ - FrameData: { - FlagDataEndStream: "END_STREAM", - FlagDataPadded: "PADDED", - }, - FrameHeaders: { - FlagHeadersEndStream: "END_STREAM", - FlagHeadersEndHeaders: "END_HEADERS", - FlagHeadersPadded: "PADDED", - FlagHeadersPriority: "PRIORITY", - }, - FrameSettings: { - FlagSettingsAck: "ACK", - }, - FramePing: { - FlagPingAck: "ACK", - }, - FrameContinuation: { - FlagContinuationEndHeaders: "END_HEADERS", - }, - FramePushPromise: { - FlagPushPromiseEndHeaders: "END_HEADERS", - FlagPushPromisePadded: "PADDED", - }, -} - -// a frameParser parses a frame given its FrameHeader and payload -// bytes. The length of payload will always equal fh.Length (which -// might be 0). -type frameParser func(fh FrameHeader, payload []byte) (Frame, error) - -var frameParsers = map[FrameType]frameParser{ - FrameData: parseDataFrame, - FrameHeaders: parseHeadersFrame, - FramePriority: parsePriorityFrame, - FrameRSTStream: parseRSTStreamFrame, - FrameSettings: parseSettingsFrame, - FramePushPromise: parsePushPromise, - FramePing: parsePingFrame, - FrameGoAway: parseGoAwayFrame, - FrameWindowUpdate: parseWindowUpdateFrame, - FrameContinuation: parseContinuationFrame, -} - -func typeFrameParser(t FrameType) frameParser { - if f := frameParsers[t]; f != nil { - return f - } - return parseUnknownFrame -} - -// A FrameHeader is the 9 byte header of all HTTP/2 frames. -// -// See http://http2.github.io/http2-spec/#FrameHeader -type FrameHeader struct { - valid bool // caller can access []byte fields in the Frame - - // Type is the 1 byte frame type. There are ten standard frame - // types, but extension frame types may be written by WriteRawFrame - // and will be returned by ReadFrame (as UnknownFrame). - Type FrameType - - // Flags are the 1 byte of 8 potential bit flags per frame. - // They are specific to the frame type. - Flags Flags - - // Length is the length of the frame, not including the 9 byte header. - // The maximum size is one byte less than 16MB (uint24), but only - // frames up to 16KB are allowed without peer agreement. - Length uint32 - - // StreamID is which stream this frame is for. Certain frames - // are not stream-specific, in which case this field is 0. - StreamID uint32 -} - -// Header returns h. It exists so FrameHeaders can be embedded in other -// specific frame types and implement the Frame interface. -func (h FrameHeader) Header() FrameHeader { return h } - -func (h FrameHeader) String() string { - var buf bytes.Buffer - buf.WriteString("[FrameHeader ") - h.writeDebug(&buf) - buf.WriteByte(']') - return buf.String() -} - -func (h FrameHeader) writeDebug(buf *bytes.Buffer) { - buf.WriteString(h.Type.String()) - if h.Flags != 0 { - buf.WriteString(" flags=") - set := 0 - for i := uint8(0); i < 8; i++ { - if h.Flags&(1< 1 { - buf.WriteByte('|') - } - name := flagName[h.Type][Flags(1<>24), - byte(streamID>>16), - byte(streamID>>8), - byte(streamID)) -} - -func (f *Framer) endWrite() error { - // Now that we know the final size, fill in the FrameHeader in - // the space previously reserved for it. Abuse append. - length := len(f.wbuf) - frameHeaderLen - if length >= (1 << 24) { - return ErrFrameTooLarge - } - _ = append(f.wbuf[:0], - byte(length>>16), - byte(length>>8), - byte(length)) - if logFrameWrites { - f.logWrite() - } - - n, err := f.w.Write(f.wbuf) - if err == nil && n != len(f.wbuf) { - err = io.ErrShortWrite - } - return err -} - -func (f *Framer) logWrite() { - if f.debugFramer == nil { - f.debugFramerBuf = new(bytes.Buffer) - f.debugFramer = NewFramer(nil, f.debugFramerBuf) - f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below - // Let us read anything, even if we accidentally wrote it - // in the wrong order: - f.debugFramer.AllowIllegalReads = true - } - f.debugFramerBuf.Write(f.wbuf) - fr, err := f.debugFramer.ReadFrame() - if err != nil { - log.Printf("http2: Framer %p: failed to decode just-written frame", f) - return - } - log.Printf("http2: Framer %p: wrote %v", f, summarizeFrame(fr)) -} - -func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) } -func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) } -func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) } -func (f *Framer) writeUint32(v uint32) { - f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) -} - -const ( - minMaxFrameSize = 1 << 14 - maxFrameSize = 1<<24 - 1 -) - -// NewFramer returns a Framer that writes frames to w and reads them from r. -func NewFramer(w io.Writer, r io.Reader) *Framer { - fr := &Framer{ - w: w, - r: r, - logReads: logFrameReads, - } - fr.getReadBuf = func(size uint32) []byte { - if cap(fr.readBuf) >= int(size) { - return fr.readBuf[:size] - } - fr.readBuf = make([]byte, size) - return fr.readBuf - } - fr.SetMaxReadFrameSize(maxFrameSize) - return fr -} - -// SetMaxReadFrameSize sets the maximum size of a frame -// that will be read by a subsequent call to ReadFrame. -// It is the caller's responsibility to advertise this -// limit with a SETTINGS frame. -func (fr *Framer) SetMaxReadFrameSize(v uint32) { - if v > maxFrameSize { - v = maxFrameSize - } - fr.maxReadSize = v -} - -// ErrorDetail returns a more detailed error of the last error -// returned by Framer.ReadFrame. For instance, if ReadFrame -// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail -// will say exactly what was invalid. ErrorDetail is not guaranteed -// to return a non-nil value and like the rest of the http2 package, -// its return value is not protected by an API compatibility promise. -// ErrorDetail is reset after the next call to ReadFrame. -func (fr *Framer) ErrorDetail() error { - return fr.errDetail -} - -// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer -// sends a frame that is larger than declared with SetMaxReadFrameSize. -var ErrFrameTooLarge = errors.New("http2: frame too large") - -// terminalReadFrameError reports whether err is an unrecoverable -// error from ReadFrame and no other frames should be read. -func terminalReadFrameError(err error) bool { - if _, ok := err.(StreamError); ok { - return false - } - return err != nil -} - -// ReadFrame reads a single frame. The returned Frame is only valid -// until the next call to ReadFrame. -// -// If the frame is larger than previously set with SetMaxReadFrameSize, the -// returned error is ErrFrameTooLarge. Other errors may be of type -// ConnectionError, StreamError, or anything else from the underlying -// reader. -func (fr *Framer) ReadFrame() (Frame, error) { - fr.errDetail = nil - if fr.lastFrame != nil { - fr.lastFrame.invalidate() - } - fh, err := readFrameHeader(fr.headerBuf[:], fr.r) - if err != nil { - return nil, err - } - if fh.Length > fr.maxReadSize { - return nil, ErrFrameTooLarge - } - payload := fr.getReadBuf(fh.Length) - if _, err := io.ReadFull(fr.r, payload); err != nil { - return nil, err - } - f, err := typeFrameParser(fh.Type)(fh, payload) - if err != nil { - if ce, ok := err.(connError); ok { - return nil, fr.connError(ce.Code, ce.Reason) - } - return nil, err - } - if err := fr.checkFrameOrder(f); err != nil { - return nil, err - } - if fr.logReads { - log.Printf("http2: Framer %p: read %v", fr, summarizeFrame(f)) - } - if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil { - return fr.readMetaFrame(f.(*HeadersFrame)) - } - return f, nil -} - -// connError returns ConnectionError(code) but first -// stashes away a public reason to the caller can optionally relay it -// to the peer before hanging up on them. This might help others debug -// their implementations. -func (fr *Framer) connError(code ErrCode, reason string) error { - fr.errDetail = errors.New(reason) - return ConnectionError(code) -} - -// checkFrameOrder reports an error if f is an invalid frame to return -// next from ReadFrame. Mostly it checks whether HEADERS and -// CONTINUATION frames are contiguous. -func (fr *Framer) checkFrameOrder(f Frame) error { - last := fr.lastFrame - fr.lastFrame = f - if fr.AllowIllegalReads { - return nil - } - - fh := f.Header() - if fr.lastHeaderStream != 0 { - if fh.Type != FrameContinuation { - return fr.connError(ErrCodeProtocol, - fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", - fh.Type, fh.StreamID, - last.Header().Type, fr.lastHeaderStream)) - } - if fh.StreamID != fr.lastHeaderStream { - return fr.connError(ErrCodeProtocol, - fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d", - fh.StreamID, fr.lastHeaderStream)) - } - } else if fh.Type == FrameContinuation { - return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID)) - } - - switch fh.Type { - case FrameHeaders, FrameContinuation: - if fh.Flags.Has(FlagHeadersEndHeaders) { - fr.lastHeaderStream = 0 - } else { - fr.lastHeaderStream = fh.StreamID - } - } - - return nil -} - -// A DataFrame conveys arbitrary, variable-length sequences of octets -// associated with a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.1 -type DataFrame struct { - FrameHeader - data []byte -} - -func (f *DataFrame) StreamEnded() bool { - return f.FrameHeader.Flags.Has(FlagDataEndStream) -} - -// Data returns the frame's data octets, not including any padding -// size byte or padding suffix bytes. -// The caller must not retain the returned memory past the next -// call to ReadFrame. -func (f *DataFrame) Data() []byte { - f.checkValid() - return f.data -} - -func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { - if fh.StreamID == 0 { - // DATA frames MUST be associated with a stream. If a - // DATA frame is received whose stream identifier - // field is 0x0, the recipient MUST respond with a - // connection error (Section 5.4.1) of type - // PROTOCOL_ERROR. - return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} - } - f := &DataFrame{ - FrameHeader: fh, - } - var padSize byte - if fh.Flags.Has(FlagDataPadded) { - var err error - payload, padSize, err = readByte(payload) - if err != nil { - return nil, err - } - } - if int(padSize) > len(payload) { - // If the length of the padding is greater than the - // length of the frame payload, the recipient MUST - // treat this as a connection error. - // Filed: https://github.com/http2/http2-spec/issues/610 - return nil, connError{ErrCodeProtocol, "pad size larger than data payload"} - } - f.data = payload[:len(payload)-int(padSize)] - return f, nil -} - -var ( - errStreamID = errors.New("invalid stream ID") - errDepStreamID = errors.New("invalid dependent stream ID") - errPadLength = errors.New("pad length too large") -) - -func validStreamIDOrZero(streamID uint32) bool { - return streamID&(1<<31) == 0 -} - -func validStreamID(streamID uint32) bool { - return streamID != 0 && streamID&(1<<31) == 0 -} - -// WriteData writes a DATA frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility not to violate the maximum frame size -// and to not call other Write methods concurrently. -func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { - return f.WriteDataPadded(streamID, endStream, data, nil) -} - -// WriteData writes a DATA frame with optional padding. -// -// If pad is nil, the padding bit is not sent. -// The length of pad must not exceed 255 bytes. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility not to violate the maximum frame size -// and to not call other Write methods concurrently. -func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - if len(pad) > 255 { - return errPadLength - } - var flags Flags - if endStream { - flags |= FlagDataEndStream - } - if pad != nil { - flags |= FlagDataPadded - } - f.startWrite(FrameData, flags, streamID) - if pad != nil { - f.wbuf = append(f.wbuf, byte(len(pad))) - } - f.wbuf = append(f.wbuf, data...) - f.wbuf = append(f.wbuf, pad...) - return f.endWrite() -} - -// A SettingsFrame conveys configuration parameters that affect how -// endpoints communicate, such as preferences and constraints on peer -// behavior. -// -// See http://http2.github.io/http2-spec/#SETTINGS -type SettingsFrame struct { - FrameHeader - p []byte -} - -func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) { - if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { - // When this (ACK 0x1) bit is set, the payload of the - // SETTINGS frame MUST be empty. Receipt of a - // SETTINGS frame with the ACK flag set and a length - // field value other than 0 MUST be treated as a - // connection error (Section 5.4.1) of type - // FRAME_SIZE_ERROR. - return nil, ConnectionError(ErrCodeFrameSize) - } - if fh.StreamID != 0 { - // SETTINGS frames always apply to a connection, - // never a single stream. The stream identifier for a - // SETTINGS frame MUST be zero (0x0). If an endpoint - // receives a SETTINGS frame whose stream identifier - // field is anything other than 0x0, the endpoint MUST - // respond with a connection error (Section 5.4.1) of - // type PROTOCOL_ERROR. - return nil, ConnectionError(ErrCodeProtocol) - } - if len(p)%6 != 0 { - // Expecting even number of 6 byte settings. - return nil, ConnectionError(ErrCodeFrameSize) - } - f := &SettingsFrame{FrameHeader: fh, p: p} - if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 { - // Values above the maximum flow control window size of 2^31 - 1 MUST - // be treated as a connection error (Section 5.4.1) of type - // FLOW_CONTROL_ERROR. - return nil, ConnectionError(ErrCodeFlowControl) - } - return f, nil -} - -func (f *SettingsFrame) IsAck() bool { - return f.FrameHeader.Flags.Has(FlagSettingsAck) -} - -func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) { - f.checkValid() - buf := f.p - for len(buf) > 0 { - settingID := SettingID(binary.BigEndian.Uint16(buf[:2])) - if settingID == s { - return binary.BigEndian.Uint32(buf[2:6]), true - } - buf = buf[6:] - } - return 0, false -} - -// ForeachSetting runs fn for each setting. -// It stops and returns the first error. -func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { - f.checkValid() - buf := f.p - for len(buf) > 0 { - if err := fn(Setting{ - SettingID(binary.BigEndian.Uint16(buf[:2])), - binary.BigEndian.Uint32(buf[2:6]), - }); err != nil { - return err - } - buf = buf[6:] - } - return nil -} - -// WriteSettings writes a SETTINGS frame with zero or more settings -// specified and the ACK bit not set. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteSettings(settings ...Setting) error { - f.startWrite(FrameSettings, 0, 0) - for _, s := range settings { - f.writeUint16(uint16(s.ID)) - f.writeUint32(s.Val) - } - return f.endWrite() -} - -// WriteSettings writes an empty SETTINGS frame with the ACK bit set. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteSettingsAck() error { - f.startWrite(FrameSettings, FlagSettingsAck, 0) - return f.endWrite() -} - -// A PingFrame is a mechanism for measuring a minimal round trip time -// from the sender, as well as determining whether an idle connection -// is still functional. -// See http://http2.github.io/http2-spec/#rfc.section.6.7 -type PingFrame struct { - FrameHeader - Data [8]byte -} - -func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } - -func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) { - if len(payload) != 8 { - return nil, ConnectionError(ErrCodeFrameSize) - } - if fh.StreamID != 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - f := &PingFrame{FrameHeader: fh} - copy(f.Data[:], payload) - return f, nil -} - -func (f *Framer) WritePing(ack bool, data [8]byte) error { - var flags Flags - if ack { - flags = FlagPingAck - } - f.startWrite(FramePing, flags, 0) - f.writeBytes(data[:]) - return f.endWrite() -} - -// A GoAwayFrame informs the remote peer to stop creating streams on this connection. -// See http://http2.github.io/http2-spec/#rfc.section.6.8 -type GoAwayFrame struct { - FrameHeader - LastStreamID uint32 - ErrCode ErrCode - debugData []byte -} - -// DebugData returns any debug data in the GOAWAY frame. Its contents -// are not defined. -// The caller must not retain the returned memory past the next -// call to ReadFrame. -func (f *GoAwayFrame) DebugData() []byte { - f.checkValid() - return f.debugData -} - -func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) { - if fh.StreamID != 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - if len(p) < 8 { - return nil, ConnectionError(ErrCodeFrameSize) - } - return &GoAwayFrame{ - FrameHeader: fh, - LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1), - ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])), - debugData: p[8:], - }, nil -} - -func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error { - f.startWrite(FrameGoAway, 0, 0) - f.writeUint32(maxStreamID & (1<<31 - 1)) - f.writeUint32(uint32(code)) - f.writeBytes(debugData) - return f.endWrite() -} - -// An UnknownFrame is the frame type returned when the frame type is unknown -// or no specific frame type parser exists. -type UnknownFrame struct { - FrameHeader - p []byte -} - -// Payload returns the frame's payload (after the header). It is not -// valid to call this method after a subsequent call to -// Framer.ReadFrame, nor is it valid to retain the returned slice. -// The memory is owned by the Framer and is invalidated when the next -// frame is read. -func (f *UnknownFrame) Payload() []byte { - f.checkValid() - return f.p -} - -func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) { - return &UnknownFrame{fh, p}, nil -} - -// A WindowUpdateFrame is used to implement flow control. -// See http://http2.github.io/http2-spec/#rfc.section.6.9 -type WindowUpdateFrame struct { - FrameHeader - Increment uint32 // never read with high bit set -} - -func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) { - if len(p) != 4 { - return nil, ConnectionError(ErrCodeFrameSize) - } - inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit - if inc == 0 { - // A receiver MUST treat the receipt of a - // WINDOW_UPDATE frame with an flow control window - // increment of 0 as a stream error (Section 5.4.2) of - // type PROTOCOL_ERROR; errors on the connection flow - // control window MUST be treated as a connection - // error (Section 5.4.1). - if fh.StreamID == 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - return nil, streamError(fh.StreamID, ErrCodeProtocol) - } - return &WindowUpdateFrame{ - FrameHeader: fh, - Increment: inc, - }, nil -} - -// WriteWindowUpdate writes a WINDOW_UPDATE frame. -// The increment value must be between 1 and 2,147,483,647, inclusive. -// If the Stream ID is zero, the window update applies to the -// connection as a whole. -func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error { - // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets." - if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites { - return errors.New("illegal window increment value") - } - f.startWrite(FrameWindowUpdate, 0, streamID) - f.writeUint32(incr) - return f.endWrite() -} - -// A HeadersFrame is used to open a stream and additionally carries a -// header block fragment. -type HeadersFrame struct { - FrameHeader - - // Priority is set if FlagHeadersPriority is set in the FrameHeader. - Priority PriorityParam - - headerFragBuf []byte // not owned -} - -func (f *HeadersFrame) HeaderBlockFragment() []byte { - f.checkValid() - return f.headerFragBuf -} - -func (f *HeadersFrame) HeadersEnded() bool { - return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders) -} - -func (f *HeadersFrame) StreamEnded() bool { - return f.FrameHeader.Flags.Has(FlagHeadersEndStream) -} - -func (f *HeadersFrame) HasPriority() bool { - return f.FrameHeader.Flags.Has(FlagHeadersPriority) -} - -func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) { - hf := &HeadersFrame{ - FrameHeader: fh, - } - if fh.StreamID == 0 { - // HEADERS frames MUST be associated with a stream. If a HEADERS frame - // is received whose stream identifier field is 0x0, the recipient MUST - // respond with a connection error (Section 5.4.1) of type - // PROTOCOL_ERROR. - return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"} - } - var padLength uint8 - if fh.Flags.Has(FlagHeadersPadded) { - if p, padLength, err = readByte(p); err != nil { - return - } - } - if fh.Flags.Has(FlagHeadersPriority) { - var v uint32 - p, v, err = readUint32(p) - if err != nil { - return nil, err - } - hf.Priority.StreamDep = v & 0x7fffffff - hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set - p, hf.Priority.Weight, err = readByte(p) - if err != nil { - return nil, err - } - } - if len(p)-int(padLength) <= 0 { - return nil, streamError(fh.StreamID, ErrCodeProtocol) - } - hf.headerFragBuf = p[:len(p)-int(padLength)] - return hf, nil -} - -// HeadersFrameParam are the parameters for writing a HEADERS frame. -type HeadersFrameParam struct { - // StreamID is the required Stream ID to initiate. - StreamID uint32 - // BlockFragment is part (or all) of a Header Block. - BlockFragment []byte - - // EndStream indicates that the header block is the last that - // the endpoint will send for the identified stream. Setting - // this flag causes the stream to enter one of "half closed" - // states. - EndStream bool - - // EndHeaders indicates that this frame contains an entire - // header block and is not followed by any - // CONTINUATION frames. - EndHeaders bool - - // PadLength is the optional number of bytes of zeros to add - // to this frame. - PadLength uint8 - - // Priority, if non-zero, includes stream priority information - // in the HEADER frame. - Priority PriorityParam -} - -// WriteHeaders writes a single HEADERS frame. -// -// This is a low-level header writing method. Encoding headers and -// splitting them into any necessary CONTINUATION frames is handled -// elsewhere. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteHeaders(p HeadersFrameParam) error { - if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { - return errStreamID - } - var flags Flags - if p.PadLength != 0 { - flags |= FlagHeadersPadded - } - if p.EndStream { - flags |= FlagHeadersEndStream - } - if p.EndHeaders { - flags |= FlagHeadersEndHeaders - } - if !p.Priority.IsZero() { - flags |= FlagHeadersPriority - } - f.startWrite(FrameHeaders, flags, p.StreamID) - if p.PadLength != 0 { - f.writeByte(p.PadLength) - } - if !p.Priority.IsZero() { - v := p.Priority.StreamDep - if !validStreamIDOrZero(v) && !f.AllowIllegalWrites { - return errDepStreamID - } - if p.Priority.Exclusive { - v |= 1 << 31 - } - f.writeUint32(v) - f.writeByte(p.Priority.Weight) - } - f.wbuf = append(f.wbuf, p.BlockFragment...) - f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) - return f.endWrite() -} - -// A PriorityFrame specifies the sender-advised priority of a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.3 -type PriorityFrame struct { - FrameHeader - PriorityParam -} - -// PriorityParam are the stream prioritzation parameters. -type PriorityParam struct { - // StreamDep is a 31-bit stream identifier for the - // stream that this stream depends on. Zero means no - // dependency. - StreamDep uint32 - - // Exclusive is whether the dependency is exclusive. - Exclusive bool - - // Weight is the stream's zero-indexed weight. It should be - // set together with StreamDep, or neither should be set. Per - // the spec, "Add one to the value to obtain a weight between - // 1 and 256." - Weight uint8 -} - -func (p PriorityParam) IsZero() bool { - return p == PriorityParam{} -} - -func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) { - if fh.StreamID == 0 { - return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} - } - if len(payload) != 5 { - return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))} - } - v := binary.BigEndian.Uint32(payload[:4]) - streamID := v & 0x7fffffff // mask off high bit - return &PriorityFrame{ - FrameHeader: fh, - PriorityParam: PriorityParam{ - Weight: payload[4], - StreamDep: streamID, - Exclusive: streamID != v, // was high bit set? - }, - }, nil -} - -// WritePriority writes a PRIORITY frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - if !validStreamIDOrZero(p.StreamDep) { - return errDepStreamID - } - f.startWrite(FramePriority, 0, streamID) - v := p.StreamDep - if p.Exclusive { - v |= 1 << 31 - } - f.writeUint32(v) - f.writeByte(p.Weight) - return f.endWrite() -} - -// A RSTStreamFrame allows for abnormal termination of a stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.4 -type RSTStreamFrame struct { - FrameHeader - ErrCode ErrCode -} - -func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) { - if len(p) != 4 { - return nil, ConnectionError(ErrCodeFrameSize) - } - if fh.StreamID == 0 { - return nil, ConnectionError(ErrCodeProtocol) - } - return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil -} - -// WriteRSTStream writes a RST_STREAM frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - f.startWrite(FrameRSTStream, 0, streamID) - f.writeUint32(uint32(code)) - return f.endWrite() -} - -// A ContinuationFrame is used to continue a sequence of header block fragments. -// See http://http2.github.io/http2-spec/#rfc.section.6.10 -type ContinuationFrame struct { - FrameHeader - headerFragBuf []byte -} - -func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) { - if fh.StreamID == 0 { - return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} - } - return &ContinuationFrame{fh, p}, nil -} - -func (f *ContinuationFrame) HeaderBlockFragment() []byte { - f.checkValid() - return f.headerFragBuf -} - -func (f *ContinuationFrame) HeadersEnded() bool { - return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders) -} - -// WriteContinuation writes a CONTINUATION frame. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error { - if !validStreamID(streamID) && !f.AllowIllegalWrites { - return errStreamID - } - var flags Flags - if endHeaders { - flags |= FlagContinuationEndHeaders - } - f.startWrite(FrameContinuation, flags, streamID) - f.wbuf = append(f.wbuf, headerBlockFragment...) - return f.endWrite() -} - -// A PushPromiseFrame is used to initiate a server stream. -// See http://http2.github.io/http2-spec/#rfc.section.6.6 -type PushPromiseFrame struct { - FrameHeader - PromiseID uint32 - headerFragBuf []byte // not owned -} - -func (f *PushPromiseFrame) HeaderBlockFragment() []byte { - f.checkValid() - return f.headerFragBuf -} - -func (f *PushPromiseFrame) HeadersEnded() bool { - return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) -} - -func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) { - pp := &PushPromiseFrame{ - FrameHeader: fh, - } - if pp.StreamID == 0 { - // PUSH_PROMISE frames MUST be associated with an existing, - // peer-initiated stream. The stream identifier of a - // PUSH_PROMISE frame indicates the stream it is associated - // with. If the stream identifier field specifies the value - // 0x0, a recipient MUST respond with a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - return nil, ConnectionError(ErrCodeProtocol) - } - // The PUSH_PROMISE frame includes optional padding. - // Padding fields and flags are identical to those defined for DATA frames - var padLength uint8 - if fh.Flags.Has(FlagPushPromisePadded) { - if p, padLength, err = readByte(p); err != nil { - return - } - } - - p, pp.PromiseID, err = readUint32(p) - if err != nil { - return - } - pp.PromiseID = pp.PromiseID & (1<<31 - 1) - - if int(padLength) > len(p) { - // like the DATA frame, error out if padding is longer than the body. - return nil, ConnectionError(ErrCodeProtocol) - } - pp.headerFragBuf = p[:len(p)-int(padLength)] - return pp, nil -} - -// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame. -type PushPromiseParam struct { - // StreamID is the required Stream ID to initiate. - StreamID uint32 - - // PromiseID is the required Stream ID which this - // Push Promises - PromiseID uint32 - - // BlockFragment is part (or all) of a Header Block. - BlockFragment []byte - - // EndHeaders indicates that this frame contains an entire - // header block and is not followed by any - // CONTINUATION frames. - EndHeaders bool - - // PadLength is the optional number of bytes of zeros to add - // to this frame. - PadLength uint8 -} - -// WritePushPromise writes a single PushPromise Frame. -// -// As with Header Frames, This is the low level call for writing -// individual frames. Continuation frames are handled elsewhere. -// -// It will perform exactly one Write to the underlying Writer. -// It is the caller's responsibility to not call other Write methods concurrently. -func (f *Framer) WritePushPromise(p PushPromiseParam) error { - if !validStreamID(p.StreamID) && !f.AllowIllegalWrites { - return errStreamID - } - var flags Flags - if p.PadLength != 0 { - flags |= FlagPushPromisePadded - } - if p.EndHeaders { - flags |= FlagPushPromiseEndHeaders - } - f.startWrite(FramePushPromise, flags, p.StreamID) - if p.PadLength != 0 { - f.writeByte(p.PadLength) - } - if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites { - return errStreamID - } - f.writeUint32(p.PromiseID) - f.wbuf = append(f.wbuf, p.BlockFragment...) - f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...) - return f.endWrite() -} - -// WriteRawFrame writes a raw frame. This can be used to write -// extension frames unknown to this package. -func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error { - f.startWrite(t, flags, streamID) - f.writeBytes(payload) - return f.endWrite() -} - -func readByte(p []byte) (remain []byte, b byte, err error) { - if len(p) == 0 { - return nil, 0, io.ErrUnexpectedEOF - } - return p[1:], p[0], nil -} - -func readUint32(p []byte) (remain []byte, v uint32, err error) { - if len(p) < 4 { - return nil, 0, io.ErrUnexpectedEOF - } - return p[4:], binary.BigEndian.Uint32(p[:4]), nil -} - -type streamEnder interface { - StreamEnded() bool -} - -type headersEnder interface { - HeadersEnded() bool -} - -type headersOrContinuation interface { - headersEnder - HeaderBlockFragment() []byte -} - -// A MetaHeadersFrame is the representation of one HEADERS frame and -// zero or more contiguous CONTINUATION frames and the decoding of -// their HPACK-encoded contents. -// -// This type of frame does not appear on the wire and is only returned -// by the Framer when Framer.ReadMetaHeaders is set. -type MetaHeadersFrame struct { - *HeadersFrame - - // Fields are the fields contained in the HEADERS and - // CONTINUATION frames. The underlying slice is owned by the - // Framer and must not be retained after the next call to - // ReadFrame. - // - // Fields are guaranteed to be in the correct http2 order and - // not have unknown pseudo header fields or invalid header - // field names or values. Required pseudo header fields may be - // missing, however. Use the MetaHeadersFrame.Pseudo accessor - // method access pseudo headers. - Fields []hpack.HeaderField - - // Truncated is whether the max header list size limit was hit - // and Fields is incomplete. The hpack decoder state is still - // valid, however. - Truncated bool -} - -// PseudoValue returns the given pseudo header field's value. -// The provided pseudo field should not contain the leading colon. -func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string { - for _, hf := range mh.Fields { - if !hf.IsPseudo() { - return "" - } - if hf.Name[1:] == pseudo { - return hf.Value - } - } - return "" -} - -// RegularFields returns the regular (non-pseudo) header fields of mh. -// The caller does not own the returned slice. -func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField { - for i, hf := range mh.Fields { - if !hf.IsPseudo() { - return mh.Fields[i:] - } - } - return nil -} - -// PseudoFields returns the pseudo header fields of mh. -// The caller does not own the returned slice. -func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField { - for i, hf := range mh.Fields { - if !hf.IsPseudo() { - return mh.Fields[:i] - } - } - return mh.Fields -} - -func (mh *MetaHeadersFrame) checkPseudos() error { - var isRequest, isResponse bool - pf := mh.PseudoFields() - for i, hf := range pf { - switch hf.Name { - case ":method", ":path", ":scheme", ":authority": - isRequest = true - case ":status": - isResponse = true - default: - return pseudoHeaderError(hf.Name) - } - // Check for duplicates. - // This would be a bad algorithm, but N is 4. - // And this doesn't allocate. - for _, hf2 := range pf[:i] { - if hf.Name == hf2.Name { - return duplicatePseudoHeaderError(hf.Name) - } - } - } - if isRequest && isResponse { - return errMixPseudoHeaderTypes - } - return nil -} - -func (fr *Framer) maxHeaderStringLen() int { - v := fr.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) - } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 -} - -// readMetaFrame returns 0 or more CONTINUATION frames from fr and -// merge them into into the provided hf and returns a MetaHeadersFrame -// with the decoded hpack values. -func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { - if fr.AllowIllegalReads { - return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders") - } - mh := &MetaHeadersFrame{ - HeadersFrame: hf, - } - var remainSize = fr.maxHeaderListSize() - var sawRegular bool - - var invalid error // pseudo header field errors - hdec := fr.ReadMetaHeaders - hdec.SetEmitEnabled(true) - hdec.SetMaxStringLength(fr.maxHeaderStringLen()) - hdec.SetEmitFunc(func(hf hpack.HeaderField) { - if VerboseLogs && logFrameReads { - log.Printf("http2: decoded hpack field %+v", hf) - } - if !httplex.ValidHeaderFieldValue(hf.Value) { - invalid = headerFieldValueError(hf.Value) - } - isPseudo := strings.HasPrefix(hf.Name, ":") - if isPseudo { - if sawRegular { - invalid = errPseudoAfterRegular - } - } else { - sawRegular = true - if !validWireHeaderFieldName(hf.Name) { - invalid = headerFieldNameError(hf.Name) - } - } - - if invalid != nil { - hdec.SetEmitEnabled(false) - return - } - - size := hf.Size() - if size > remainSize { - hdec.SetEmitEnabled(false) - mh.Truncated = true - return - } - remainSize -= size - - mh.Fields = append(mh.Fields, hf) - }) - // Lose reference to MetaHeadersFrame: - defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {}) - - var hc headersOrContinuation = hf - for { - frag := hc.HeaderBlockFragment() - if _, err := hdec.Write(frag); err != nil { - return nil, ConnectionError(ErrCodeCompression) - } - - if hc.HeadersEnded() { - break - } - if f, err := fr.ReadFrame(); err != nil { - return nil, err - } else { - hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder - } - } - - mh.HeadersFrame.headerFragBuf = nil - mh.HeadersFrame.invalidate() - - if err := hdec.Close(); err != nil { - return nil, ConnectionError(ErrCodeCompression) - } - if invalid != nil { - fr.errDetail = invalid - if VerboseLogs { - log.Printf("http2: invalid header: %v", invalid) - } - return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid} - } - if err := mh.checkPseudos(); err != nil { - fr.errDetail = err - if VerboseLogs { - log.Printf("http2: invalid pseudo headers: %v", err) - } - return nil, StreamError{mh.StreamID, ErrCodeProtocol, err} - } - return mh, nil -} - -func summarizeFrame(f Frame) string { - var buf bytes.Buffer - f.Header().writeDebug(&buf) - switch f := f.(type) { - case *SettingsFrame: - n := 0 - f.ForeachSetting(func(s Setting) error { - n++ - if n == 1 { - buf.WriteString(", settings:") - } - fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val) - return nil - }) - if n > 0 { - buf.Truncate(buf.Len() - 1) // remove trailing comma - } - case *DataFrame: - data := f.Data() - const max = 256 - if len(data) > max { - data = data[:max] - } - fmt.Fprintf(&buf, " data=%q", data) - if len(f.Data()) > max { - fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max) - } - case *WindowUpdateFrame: - if f.StreamID == 0 { - buf.WriteString(" (conn)") - } - fmt.Fprintf(&buf, " incr=%v", f.Increment) - case *PingFrame: - fmt.Fprintf(&buf, " ping=%q", f.Data[:]) - case *GoAwayFrame: - fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q", - f.LastStreamID, f.ErrCode, f.debugData) - case *RSTStreamFrame: - fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode) - } - return buf.String() -} diff --git a/vendor/golang.org/x/net/http2/go16.go b/vendor/golang.org/x/net/http2/go16.go deleted file mode 100644 index 2b72855f55..0000000000 --- a/vendor/golang.org/x/net/http2/go16.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.6 - -package http2 - -import ( - "crypto/tls" - "net/http" - "time" -) - -func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { - return t1.ExpectContinueTimeout -} - -// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. -func isBadCipher(cipher uint16) bool { - switch cipher { - case tls.TLS_RSA_WITH_RC4_128_SHA, - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: - // Reject cipher suites from Appendix A. - // "This list includes those cipher suites that do not - // offer an ephemeral key exchange and those that are - // based on the TLS null, stream or block cipher type" - return true - default: - return false - } -} diff --git a/vendor/golang.org/x/net/http2/go17.go b/vendor/golang.org/x/net/http2/go17.go deleted file mode 100644 index 730319dd5a..0000000000 --- a/vendor/golang.org/x/net/http2/go17.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package http2 - -import ( - "context" - "net" - "net/http" - "net/http/httptrace" - "time" -) - -type contextContext interface { - context.Context -} - -func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { - ctx, cancel = context.WithCancel(context.Background()) - ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) - if hs := opts.baseConfig(); hs != nil { - ctx = context.WithValue(ctx, http.ServerContextKey, hs) - } - return -} - -func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { - return context.WithCancel(ctx) -} - -func requestWithContext(req *http.Request, ctx contextContext) *http.Request { - return req.WithContext(ctx) -} - -type clientTrace httptrace.ClientTrace - -func reqContext(r *http.Request) context.Context { return r.Context() } - -func setResponseUncompressed(res *http.Response) { res.Uncompressed = true } - -func traceGotConn(req *http.Request, cc *ClientConn) { - trace := httptrace.ContextClientTrace(req.Context()) - if trace == nil || trace.GotConn == nil { - return - } - ci := httptrace.GotConnInfo{Conn: cc.tconn} - cc.mu.Lock() - ci.Reused = cc.nextStreamID > 1 - ci.WasIdle = len(cc.streams) == 0 && ci.Reused - if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Now().Sub(cc.lastActive) - } - cc.mu.Unlock() - - trace.GotConn(ci) -} - -func traceWroteHeaders(trace *clientTrace) { - if trace != nil && trace.WroteHeaders != nil { - trace.WroteHeaders() - } -} - -func traceGot100Continue(trace *clientTrace) { - if trace != nil && trace.Got100Continue != nil { - trace.Got100Continue() - } -} - -func traceWait100Continue(trace *clientTrace) { - if trace != nil && trace.Wait100Continue != nil { - trace.Wait100Continue() - } -} - -func traceWroteRequest(trace *clientTrace, err error) { - if trace != nil && trace.WroteRequest != nil { - trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) - } -} - -func traceFirstResponseByte(trace *clientTrace) { - if trace != nil && trace.GotFirstResponseByte != nil { - trace.GotFirstResponseByte() - } -} - -func requestTrace(req *http.Request) *clientTrace { - trace := httptrace.ContextClientTrace(req.Context()) - return (*clientTrace)(trace) -} diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go deleted file mode 100644 index 9933c9f8c7..0000000000 --- a/vendor/golang.org/x/net/http2/gotrack.go +++ /dev/null @@ -1,170 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Defensive debug-only utility to track that functions run on the -// goroutine that they're supposed to. - -package http2 - -import ( - "bytes" - "errors" - "fmt" - "os" - "runtime" - "strconv" - "sync" -) - -var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" - -type goroutineLock uint64 - -func newGoroutineLock() goroutineLock { - if !DebugGoroutines { - return 0 - } - return goroutineLock(curGoroutineID()) -} - -func (g goroutineLock) check() { - if !DebugGoroutines { - return - } - if curGoroutineID() != uint64(g) { - panic("running on the wrong goroutine") - } -} - -func (g goroutineLock) checkNotOn() { - if !DebugGoroutines { - return - } - if curGoroutineID() == uint64(g) { - panic("running on the wrong goroutine") - } -} - -var goroutineSpace = []byte("goroutine ") - -func curGoroutineID() uint64 { - bp := littleBuf.Get().(*[]byte) - defer littleBuf.Put(bp) - b := *bp - b = b[:runtime.Stack(b, false)] - // Parse the 4707 out of "goroutine 4707 [" - b = bytes.TrimPrefix(b, goroutineSpace) - i := bytes.IndexByte(b, ' ') - if i < 0 { - panic(fmt.Sprintf("No space found in %q", b)) - } - b = b[:i] - n, err := parseUintBytes(b, 10, 64) - if err != nil { - panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) - } - return n -} - -var littleBuf = sync.Pool{ - New: func() interface{} { - buf := make([]byte, 64) - return &buf - }, -} - -// parseUintBytes is like strconv.ParseUint, but using a []byte. -func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) { - var cutoff, maxVal uint64 - - if bitSize == 0 { - bitSize = int(strconv.IntSize) - } - - s0 := s - switch { - case len(s) < 1: - err = strconv.ErrSyntax - goto Error - - case 2 <= base && base <= 36: - // valid base; nothing to do - - case base == 0: - // Look for octal, hex prefix. - switch { - case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'): - base = 16 - s = s[2:] - if len(s) < 1 { - err = strconv.ErrSyntax - goto Error - } - case s[0] == '0': - base = 8 - default: - base = 10 - } - - default: - err = errors.New("invalid base " + strconv.Itoa(base)) - goto Error - } - - n = 0 - cutoff = cutoff64(base) - maxVal = 1<= base { - n = 0 - err = strconv.ErrSyntax - goto Error - } - - if n >= cutoff { - // n*base overflows - n = 1<<64 - 1 - err = strconv.ErrRange - goto Error - } - n *= uint64(base) - - n1 := n + uint64(v) - if n1 < n || n1 > maxVal { - // n+v overflows - n = 1<<64 - 1 - err = strconv.ErrRange - goto Error - } - n = n1 - } - - return n, nil - -Error: - return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err} -} - -// Return the first number n such that n*base >= 1<<64. -func cutoff64(base int) uint64 { - if base < 2 { - return 0 - } - return (1<<64-1)/uint64(base) + 1 -} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go deleted file mode 100644 index c2805f6ac4..0000000000 --- a/vendor/golang.org/x/net/http2/headermap.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "net/http" - "strings" -) - -var ( - commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case - commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case -) - -func init() { - for _, v := range []string{ - "accept", - "accept-charset", - "accept-encoding", - "accept-language", - "accept-ranges", - "age", - "access-control-allow-origin", - "allow", - "authorization", - "cache-control", - "content-disposition", - "content-encoding", - "content-language", - "content-length", - "content-location", - "content-range", - "content-type", - "cookie", - "date", - "etag", - "expect", - "expires", - "from", - "host", - "if-match", - "if-modified-since", - "if-none-match", - "if-unmodified-since", - "last-modified", - "link", - "location", - "max-forwards", - "proxy-authenticate", - "proxy-authorization", - "range", - "referer", - "refresh", - "retry-after", - "server", - "set-cookie", - "strict-transport-security", - "trailer", - "transfer-encoding", - "user-agent", - "vary", - "via", - "www-authenticate", - } { - chk := http.CanonicalHeaderKey(v) - commonLowerHeader[chk] = v - commonCanonHeader[v] = chk - } -} - -func lowerHeader(v string) string { - if s, ok := commonLowerHeader[v]; ok { - return s - } - return strings.ToLower(v) -} diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go deleted file mode 100644 index f9bb033984..0000000000 --- a/vendor/golang.org/x/net/http2/hpack/encode.go +++ /dev/null @@ -1,251 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -import ( - "io" -) - -const ( - uint32Max = ^uint32(0) - initialHeaderTableSize = 4096 -) - -type Encoder struct { - dynTab dynamicTable - // minSize is the minimum table size set by - // SetMaxDynamicTableSize after the previous Header Table Size - // Update. - minSize uint32 - // maxSizeLimit is the maximum table size this encoder - // supports. This will protect the encoder from too large - // size. - maxSizeLimit uint32 - // tableSizeUpdate indicates whether "Header Table Size - // Update" is required. - tableSizeUpdate bool - w io.Writer - buf []byte -} - -// NewEncoder returns a new Encoder which performs HPACK encoding. An -// encoded data is written to w. -func NewEncoder(w io.Writer) *Encoder { - e := &Encoder{ - minSize: uint32Max, - maxSizeLimit: initialHeaderTableSize, - tableSizeUpdate: false, - w: w, - } - e.dynTab.setMaxSize(initialHeaderTableSize) - return e -} - -// WriteField encodes f into a single Write to e's underlying Writer. -// This function may also produce bytes for "Header Table Size Update" -// if necessary. If produced, it is done before encoding f. -func (e *Encoder) WriteField(f HeaderField) error { - e.buf = e.buf[:0] - - if e.tableSizeUpdate { - e.tableSizeUpdate = false - if e.minSize < e.dynTab.maxSize { - e.buf = appendTableSize(e.buf, e.minSize) - } - e.minSize = uint32Max - e.buf = appendTableSize(e.buf, e.dynTab.maxSize) - } - - idx, nameValueMatch := e.searchTable(f) - if nameValueMatch { - e.buf = appendIndexed(e.buf, idx) - } else { - indexing := e.shouldIndex(f) - if indexing { - e.dynTab.add(f) - } - - if idx == 0 { - e.buf = appendNewName(e.buf, f, indexing) - } else { - e.buf = appendIndexedName(e.buf, f, idx, indexing) - } - } - n, err := e.w.Write(e.buf) - if err == nil && n != len(e.buf) { - err = io.ErrShortWrite - } - return err -} - -// searchTable searches f in both stable and dynamic header tables. -// The static header table is searched first. Only when there is no -// exact match for both name and value, the dynamic header table is -// then searched. If there is no match, i is 0. If both name and value -// match, i is the matched index and nameValueMatch becomes true. If -// only name matches, i points to that index and nameValueMatch -// becomes false. -func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { - for idx, hf := range staticTable { - if !constantTimeStringCompare(hf.Name, f.Name) { - continue - } - if i == 0 { - i = uint64(idx + 1) - } - if f.Sensitive { - continue - } - if !constantTimeStringCompare(hf.Value, f.Value) { - continue - } - i = uint64(idx + 1) - nameValueMatch = true - return - } - - j, nameValueMatch := e.dynTab.search(f) - if nameValueMatch || (i == 0 && j != 0) { - i = j + uint64(len(staticTable)) - } - return -} - -// SetMaxDynamicTableSize changes the dynamic header table size to v. -// The actual size is bounded by the value passed to -// SetMaxDynamicTableSizeLimit. -func (e *Encoder) SetMaxDynamicTableSize(v uint32) { - if v > e.maxSizeLimit { - v = e.maxSizeLimit - } - if v < e.minSize { - e.minSize = v - } - e.tableSizeUpdate = true - e.dynTab.setMaxSize(v) -} - -// SetMaxDynamicTableSizeLimit changes the maximum value that can be -// specified in SetMaxDynamicTableSize to v. By default, it is set to -// 4096, which is the same size of the default dynamic header table -// size described in HPACK specification. If the current maximum -// dynamic header table size is strictly greater than v, "Header Table -// Size Update" will be done in the next WriteField call and the -// maximum dynamic header table size is truncated to v. -func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) { - e.maxSizeLimit = v - if e.dynTab.maxSize > v { - e.tableSizeUpdate = true - e.dynTab.setMaxSize(v) - } -} - -// shouldIndex reports whether f should be indexed. -func (e *Encoder) shouldIndex(f HeaderField) bool { - return !f.Sensitive && f.Size() <= e.dynTab.maxSize -} - -// appendIndexed appends index i, as encoded in "Indexed Header Field" -// representation, to dst and returns the extended buffer. -func appendIndexed(dst []byte, i uint64) []byte { - first := len(dst) - dst = appendVarInt(dst, 7, i) - dst[first] |= 0x80 - return dst -} - -// appendNewName appends f, as encoded in one of "Literal Header field -// - New Name" representation variants, to dst and returns the -// extended buffer. -// -// If f.Sensitive is true, "Never Indexed" representation is used. If -// f.Sensitive is false and indexing is true, "Inremental Indexing" -// representation is used. -func appendNewName(dst []byte, f HeaderField, indexing bool) []byte { - dst = append(dst, encodeTypeByte(indexing, f.Sensitive)) - dst = appendHpackString(dst, f.Name) - return appendHpackString(dst, f.Value) -} - -// appendIndexedName appends f and index i referring indexed name -// entry, as encoded in one of "Literal Header field - Indexed Name" -// representation variants, to dst and returns the extended buffer. -// -// If f.Sensitive is true, "Never Indexed" representation is used. If -// f.Sensitive is false and indexing is true, "Incremental Indexing" -// representation is used. -func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte { - first := len(dst) - var n byte - if indexing { - n = 6 - } else { - n = 4 - } - dst = appendVarInt(dst, n, i) - dst[first] |= encodeTypeByte(indexing, f.Sensitive) - return appendHpackString(dst, f.Value) -} - -// appendTableSize appends v, as encoded in "Header Table Size Update" -// representation, to dst and returns the extended buffer. -func appendTableSize(dst []byte, v uint32) []byte { - first := len(dst) - dst = appendVarInt(dst, 5, uint64(v)) - dst[first] |= 0x20 - return dst -} - -// appendVarInt appends i, as encoded in variable integer form using n -// bit prefix, to dst and returns the extended buffer. -// -// See -// http://http2.github.io/http2-spec/compression.html#integer.representation -func appendVarInt(dst []byte, n byte, i uint64) []byte { - k := uint64((1 << n) - 1) - if i < k { - return append(dst, byte(i)) - } - dst = append(dst, byte(k)) - i -= k - for ; i >= 128; i >>= 7 { - dst = append(dst, byte(0x80|(i&0x7f))) - } - return append(dst, byte(i)) -} - -// appendHpackString appends s, as encoded in "String Literal" -// representation, to dst and returns the the extended buffer. -// -// s will be encoded in Huffman codes only when it produces strictly -// shorter byte string. -func appendHpackString(dst []byte, s string) []byte { - huffmanLength := HuffmanEncodeLength(s) - if huffmanLength < uint64(len(s)) { - first := len(dst) - dst = appendVarInt(dst, 7, huffmanLength) - dst = AppendHuffmanString(dst, s) - dst[first] |= 0x80 - } else { - dst = appendVarInt(dst, 7, uint64(len(s))) - dst = append(dst, s...) - } - return dst -} - -// encodeTypeByte returns type byte. If sensitive is true, type byte -// for "Never Indexed" representation is returned. If sensitive is -// false and indexing is true, type byte for "Incremental Indexing" -// representation is returned. Otherwise, type byte for "Without -// Indexing" is returned. -func encodeTypeByte(indexing, sensitive bool) byte { - if sensitive { - return 0x10 - } - if indexing { - return 0x40 - } - return 0 -} diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go deleted file mode 100644 index 8aa197ad67..0000000000 --- a/vendor/golang.org/x/net/http2/hpack/hpack.go +++ /dev/null @@ -1,542 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package hpack implements HPACK, a compression format for -// efficiently representing HTTP header fields in the context of HTTP/2. -// -// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09 -package hpack - -import ( - "bytes" - "errors" - "fmt" -) - -// A DecodingError is something the spec defines as a decoding error. -type DecodingError struct { - Err error -} - -func (de DecodingError) Error() string { - return fmt.Sprintf("decoding error: %v", de.Err) -} - -// An InvalidIndexError is returned when an encoder references a table -// entry before the static table or after the end of the dynamic table. -type InvalidIndexError int - -func (e InvalidIndexError) Error() string { - return fmt.Sprintf("invalid indexed representation index %d", int(e)) -} - -// A HeaderField is a name-value pair. Both the name and value are -// treated as opaque sequences of octets. -type HeaderField struct { - Name, Value string - - // Sensitive means that this header field should never be - // indexed. - Sensitive bool -} - -// IsPseudo reports whether the header field is an http2 pseudo header. -// That is, it reports whether it starts with a colon. -// It is not otherwise guaranteed to be a valid pseudo header field, -// though. -func (hf HeaderField) IsPseudo() bool { - return len(hf.Name) != 0 && hf.Name[0] == ':' -} - -func (hf HeaderField) String() string { - var suffix string - if hf.Sensitive { - suffix = " (sensitive)" - } - return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix) -} - -// Size returns the size of an entry per RFC 7540 section 5.2. -func (hf HeaderField) Size() uint32 { - // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 - // "The size of the dynamic table is the sum of the size of - // its entries. The size of an entry is the sum of its name's - // length in octets (as defined in Section 5.2), its value's - // length in octets (see Section 5.2), plus 32. The size of - // an entry is calculated using the length of the name and - // value without any Huffman encoding applied." - - // This can overflow if somebody makes a large HeaderField - // Name and/or Value by hand, but we don't care, because that - // won't happen on the wire because the encoding doesn't allow - // it. - return uint32(len(hf.Name) + len(hf.Value) + 32) -} - -// A Decoder is the decoding context for incremental processing of -// header blocks. -type Decoder struct { - dynTab dynamicTable - emit func(f HeaderField) - - emitEnabled bool // whether calls to emit are enabled - maxStrLen int // 0 means unlimited - - // buf is the unparsed buffer. It's only written to - // saveBuf if it was truncated in the middle of a header - // block. Because it's usually not owned, we can only - // process it under Write. - buf []byte // not owned; only valid during Write - - // saveBuf is previous data passed to Write which we weren't able - // to fully parse before. Unlike buf, we own this data. - saveBuf bytes.Buffer -} - -// NewDecoder returns a new decoder with the provided maximum dynamic -// table size. The emitFunc will be called for each valid field -// parsed, in the same goroutine as calls to Write, before Write returns. -func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder { - d := &Decoder{ - emit: emitFunc, - emitEnabled: true, - } - d.dynTab.allowedMaxSize = maxDynamicTableSize - d.dynTab.setMaxSize(maxDynamicTableSize) - return d -} - -// ErrStringLength is returned by Decoder.Write when the max string length -// (as configured by Decoder.SetMaxStringLength) would be violated. -var ErrStringLength = errors.New("hpack: string too long") - -// SetMaxStringLength sets the maximum size of a HeaderField name or -// value string. If a string exceeds this length (even after any -// decompression), Write will return ErrStringLength. -// A value of 0 means unlimited and is the default from NewDecoder. -func (d *Decoder) SetMaxStringLength(n int) { - d.maxStrLen = n -} - -// SetEmitFunc changes the callback used when new header fields -// are decoded. -// It must be non-nil. It does not affect EmitEnabled. -func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) { - d.emit = emitFunc -} - -// SetEmitEnabled controls whether the emitFunc provided to NewDecoder -// should be called. The default is true. -// -// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE -// while still decoding and keeping in-sync with decoder state, but -// without doing unnecessary decompression or generating unnecessary -// garbage for header fields past the limit. -func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v } - -// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder -// are currently enabled. The default is true. -func (d *Decoder) EmitEnabled() bool { return d.emitEnabled } - -// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their -// underlying buffers for garbage reasons. - -func (d *Decoder) SetMaxDynamicTableSize(v uint32) { - d.dynTab.setMaxSize(v) -} - -// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded -// stream (via dynamic table size updates) may set the maximum size -// to. -func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { - d.dynTab.allowedMaxSize = v -} - -type dynamicTable struct { - // ents is the FIFO described at - // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 - // The newest (low index) is append at the end, and items are - // evicted from the front. - ents []HeaderField - size uint32 - maxSize uint32 // current maxSize - allowedMaxSize uint32 // maxSize may go up to this, inclusive -} - -func (dt *dynamicTable) setMaxSize(v uint32) { - dt.maxSize = v - dt.evict() -} - -// TODO: change dynamicTable to be a struct with a slice and a size int field, -// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1: -// -// -// Then make add increment the size. maybe the max size should move from Decoder to -// dynamicTable and add should return an ok bool if there was enough space. -// -// Later we'll need a remove operation on dynamicTable. - -func (dt *dynamicTable) add(f HeaderField) { - dt.ents = append(dt.ents, f) - dt.size += f.Size() - dt.evict() -} - -// If we're too big, evict old stuff (front of the slice) -func (dt *dynamicTable) evict() { - base := dt.ents // keep base pointer of slice - for dt.size > dt.maxSize { - dt.size -= dt.ents[0].Size() - dt.ents = dt.ents[1:] - } - - // Shift slice contents down if we evicted things. - if len(dt.ents) != len(base) { - copy(base, dt.ents) - dt.ents = base[:len(dt.ents)] - } -} - -// constantTimeStringCompare compares string a and b in a constant -// time manner. -func constantTimeStringCompare(a, b string) bool { - if len(a) != len(b) { - return false - } - - c := byte(0) - - for i := 0; i < len(a); i++ { - c |= a[i] ^ b[i] - } - - return c == 0 -} - -// Search searches f in the table. The return value i is 0 if there is -// no name match. If there is name match or name/value match, i is the -// index of that entry (1-based). If both name and value match, -// nameValueMatch becomes true. -func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) { - l := len(dt.ents) - for j := l - 1; j >= 0; j-- { - ent := dt.ents[j] - if !constantTimeStringCompare(ent.Name, f.Name) { - continue - } - if i == 0 { - i = uint64(l - j) - } - if f.Sensitive { - continue - } - if !constantTimeStringCompare(ent.Value, f.Value) { - continue - } - i = uint64(l - j) - nameValueMatch = true - return - } - return -} - -func (d *Decoder) maxTableIndex() int { - return len(d.dynTab.ents) + len(staticTable) -} - -func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { - if i < 1 { - return - } - if i > uint64(d.maxTableIndex()) { - return - } - if i <= uint64(len(staticTable)) { - return staticTable[i-1], true - } - dents := d.dynTab.ents - return dents[len(dents)-(int(i)-len(staticTable))], true -} - -// Decode decodes an entire block. -// -// TODO: remove this method and make it incremental later? This is -// easier for debugging now. -func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { - var hf []HeaderField - saveFunc := d.emit - defer func() { d.emit = saveFunc }() - d.emit = func(f HeaderField) { hf = append(hf, f) } - if _, err := d.Write(p); err != nil { - return nil, err - } - if err := d.Close(); err != nil { - return nil, err - } - return hf, nil -} - -func (d *Decoder) Close() error { - if d.saveBuf.Len() > 0 { - d.saveBuf.Reset() - return DecodingError{errors.New("truncated headers")} - } - return nil -} - -func (d *Decoder) Write(p []byte) (n int, err error) { - if len(p) == 0 { - // Prevent state machine CPU attacks (making us redo - // work up to the point of finding out we don't have - // enough data) - return - } - // Only copy the data if we have to. Optimistically assume - // that p will contain a complete header block. - if d.saveBuf.Len() == 0 { - d.buf = p - } else { - d.saveBuf.Write(p) - d.buf = d.saveBuf.Bytes() - d.saveBuf.Reset() - } - - for len(d.buf) > 0 { - err = d.parseHeaderFieldRepr() - if err == errNeedMore { - // Extra paranoia, making sure saveBuf won't - // get too large. All the varint and string - // reading code earlier should already catch - // overlong things and return ErrStringLength, - // but keep this as a last resort. - const varIntOverhead = 8 // conservative - if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) { - return 0, ErrStringLength - } - d.saveBuf.Write(d.buf) - return len(p), nil - } - if err != nil { - break - } - } - return len(p), err -} - -// errNeedMore is an internal sentinel error value that means the -// buffer is truncated and we need to read more data before we can -// continue parsing. -var errNeedMore = errors.New("need more data") - -type indexType int - -const ( - indexedTrue indexType = iota - indexedFalse - indexedNever -) - -func (v indexType) indexed() bool { return v == indexedTrue } -func (v indexType) sensitive() bool { return v == indexedNever } - -// returns errNeedMore if there isn't enough data available. -// any other error is fatal. -// consumes d.buf iff it returns nil. -// precondition: must be called with len(d.buf) > 0 -func (d *Decoder) parseHeaderFieldRepr() error { - b := d.buf[0] - switch { - case b&128 != 0: - // Indexed representation. - // High bit set? - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1 - return d.parseFieldIndexed() - case b&192 == 64: - // 6.2.1 Literal Header Field with Incremental Indexing - // 0b10xxxxxx: top two bits are 10 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1 - return d.parseFieldLiteral(6, indexedTrue) - case b&240 == 0: - // 6.2.2 Literal Header Field without Indexing - // 0b0000xxxx: top four bits are 0000 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2 - return d.parseFieldLiteral(4, indexedFalse) - case b&240 == 16: - // 6.2.3 Literal Header Field never Indexed - // 0b0001xxxx: top four bits are 0001 - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3 - return d.parseFieldLiteral(4, indexedNever) - case b&224 == 32: - // 6.3 Dynamic Table Size Update - // Top three bits are '001'. - // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3 - return d.parseDynamicTableSizeUpdate() - } - - return DecodingError{errors.New("invalid encoding")} -} - -// (same invariants and behavior as parseHeaderFieldRepr) -func (d *Decoder) parseFieldIndexed() error { - buf := d.buf - idx, buf, err := readVarInt(7, buf) - if err != nil { - return err - } - hf, ok := d.at(idx) - if !ok { - return DecodingError{InvalidIndexError(idx)} - } - d.buf = buf - return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value}) -} - -// (same invariants and behavior as parseHeaderFieldRepr) -func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error { - buf := d.buf - nameIdx, buf, err := readVarInt(n, buf) - if err != nil { - return err - } - - var hf HeaderField - wantStr := d.emitEnabled || it.indexed() - if nameIdx > 0 { - ihf, ok := d.at(nameIdx) - if !ok { - return DecodingError{InvalidIndexError(nameIdx)} - } - hf.Name = ihf.Name - } else { - hf.Name, buf, err = d.readString(buf, wantStr) - if err != nil { - return err - } - } - hf.Value, buf, err = d.readString(buf, wantStr) - if err != nil { - return err - } - d.buf = buf - if it.indexed() { - d.dynTab.add(hf) - } - hf.Sensitive = it.sensitive() - return d.callEmit(hf) -} - -func (d *Decoder) callEmit(hf HeaderField) error { - if d.maxStrLen != 0 { - if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen { - return ErrStringLength - } - } - if d.emitEnabled { - d.emit(hf) - } - return nil -} - -// (same invariants and behavior as parseHeaderFieldRepr) -func (d *Decoder) parseDynamicTableSizeUpdate() error { - buf := d.buf - size, buf, err := readVarInt(5, buf) - if err != nil { - return err - } - if size > uint64(d.dynTab.allowedMaxSize) { - return DecodingError{errors.New("dynamic table size update too large")} - } - d.dynTab.setMaxSize(uint32(size)) - d.buf = buf - return nil -} - -var errVarintOverflow = DecodingError{errors.New("varint integer overflow")} - -// readVarInt reads an unsigned variable length integer off the -// beginning of p. n is the parameter as described in -// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1. -// -// n must always be between 1 and 8. -// -// The returned remain buffer is either a smaller suffix of p, or err != nil. -// The error is errNeedMore if p doesn't contain a complete integer. -func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) { - if n < 1 || n > 8 { - panic("bad n") - } - if len(p) == 0 { - return 0, p, errNeedMore - } - i = uint64(p[0]) - if n < 8 { - i &= (1 << uint64(n)) - 1 - } - if i < (1< 0 { - b := p[0] - p = p[1:] - i += uint64(b&127) << m - if b&128 == 0 { - return i, p, nil - } - m += 7 - if m >= 63 { // TODO: proper overflow check. making this up. - return 0, origP, errVarintOverflow - } - } - return 0, origP, errNeedMore -} - -// readString decodes an hpack string from p. -// -// wantStr is whether s will be used. If false, decompression and -// []byte->string garbage are skipped if s will be ignored -// anyway. This does mean that huffman decoding errors for non-indexed -// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server -// is returning an error anyway, and because they're not indexed, the error -// won't affect the decoding state. -func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) { - if len(p) == 0 { - return "", p, errNeedMore - } - isHuff := p[0]&128 != 0 - strLen, p, err := readVarInt(7, p) - if err != nil { - return "", p, err - } - if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) { - return "", nil, ErrStringLength - } - if uint64(len(p)) < strLen { - return "", p, errNeedMore - } - if !isHuff { - if wantStr { - s = string(p[:strLen]) - } - return s, p[strLen:], nil - } - - if wantStr { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() // don't trust others - defer bufPool.Put(buf) - if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil { - buf.Reset() - return "", nil, err - } - s = buf.String() - buf.Reset() // be nice to GC - } - return s, p[strLen:], nil -} diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go deleted file mode 100644 index 8850e39467..0000000000 --- a/vendor/golang.org/x/net/http2/hpack/huffman.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -import ( - "bytes" - "errors" - "io" - "sync" -) - -var bufPool = sync.Pool{ - New: func() interface{} { return new(bytes.Buffer) }, -} - -// HuffmanDecode decodes the string in v and writes the expanded -// result to w, returning the number of bytes written to w and the -// Write call's return value. At most one Write call is made. -func HuffmanDecode(w io.Writer, v []byte) (int, error) { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - defer bufPool.Put(buf) - if err := huffmanDecode(buf, 0, v); err != nil { - return 0, err - } - return w.Write(buf.Bytes()) -} - -// HuffmanDecodeToString decodes the string in v. -func HuffmanDecodeToString(v []byte) (string, error) { - buf := bufPool.Get().(*bytes.Buffer) - buf.Reset() - defer bufPool.Put(buf) - if err := huffmanDecode(buf, 0, v); err != nil { - return "", err - } - return buf.String(), nil -} - -// ErrInvalidHuffman is returned for errors found decoding -// Huffman-encoded strings. -var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") - -// huffmanDecode decodes v to buf. -// If maxLen is greater than 0, attempts to write more to buf than -// maxLen bytes will return ErrStringLength. -func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { - n := rootHuffmanNode - // cur is the bit buffer that has not been fed into n. - // cbits is the number of low order bits in cur that are valid. - // sbits is the number of bits of the symbol prefix being decoded. - cur, cbits, sbits := uint(0), uint8(0), uint8(0) - for _, b := range v { - cur = cur<<8 | uint(b) - cbits += 8 - sbits += 8 - for cbits >= 8 { - idx := byte(cur >> (cbits - 8)) - n = n.children[idx] - if n == nil { - return ErrInvalidHuffman - } - if n.children == nil { - if maxLen != 0 && buf.Len() == maxLen { - return ErrStringLength - } - buf.WriteByte(n.sym) - cbits -= n.codeLen - n = rootHuffmanNode - sbits = cbits - } else { - cbits -= 8 - } - } - } - for cbits > 0 { - n = n.children[byte(cur<<(8-cbits))] - if n == nil { - return ErrInvalidHuffman - } - if n.children != nil || n.codeLen > cbits { - break - } - if maxLen != 0 && buf.Len() == maxLen { - return ErrStringLength - } - buf.WriteByte(n.sym) - cbits -= n.codeLen - n = rootHuffmanNode - sbits = cbits - } - if sbits > 7 { - // Either there was an incomplete symbol, or overlong padding. - // Both are decoding errors per RFC 7541 section 5.2. - return ErrInvalidHuffman - } - if mask := uint(1< 8 { - codeLen -= 8 - i := uint8(code >> codeLen) - if cur.children[i] == nil { - cur.children[i] = newInternalNode() - } - cur = cur.children[i] - } - shift := 8 - codeLen - start, end := int(uint8(code<> (nbits - rembits)) - dst[len(dst)-1] |= t - } - - return dst -} - -// HuffmanEncodeLength returns the number of bytes required to encode -// s in Huffman codes. The result is round up to byte boundary. -func HuffmanEncodeLength(s string) uint64 { - n := uint64(0) - for i := 0; i < len(s); i++ { - n += uint64(huffmanCodeLen[s[i]]) - } - return (n + 7) / 8 -} - -// appendByteToHuffmanCode appends Huffman code for c to dst and -// returns the extended buffer and the remaining bits in the last -// element. The appending is not byte aligned and the remaining bits -// in the last element of dst is given in rembits. -func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) { - code := huffmanCodes[c] - nbits := huffmanCodeLen[c] - - for { - if rembits > nbits { - t := uint8(code << (rembits - nbits)) - dst[len(dst)-1] |= t - rembits -= nbits - break - } - - t := uint8(code >> (nbits - rembits)) - dst[len(dst)-1] |= t - - nbits -= rembits - rembits = 8 - - if nbits == 0 { - break - } - - dst = append(dst, 0) - } - - return dst, rembits -} diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go deleted file mode 100644 index b9283a0233..0000000000 --- a/vendor/golang.org/x/net/http2/hpack/tables.go +++ /dev/null @@ -1,352 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package hpack - -func pair(name, value string) HeaderField { - return HeaderField{Name: name, Value: value} -} - -// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B -var staticTable = [...]HeaderField{ - pair(":authority", ""), // index 1 (1-based) - pair(":method", "GET"), - pair(":method", "POST"), - pair(":path", "/"), - pair(":path", "/index.html"), - pair(":scheme", "http"), - pair(":scheme", "https"), - pair(":status", "200"), - pair(":status", "204"), - pair(":status", "206"), - pair(":status", "304"), - pair(":status", "400"), - pair(":status", "404"), - pair(":status", "500"), - pair("accept-charset", ""), - pair("accept-encoding", "gzip, deflate"), - pair("accept-language", ""), - pair("accept-ranges", ""), - pair("accept", ""), - pair("access-control-allow-origin", ""), - pair("age", ""), - pair("allow", ""), - pair("authorization", ""), - pair("cache-control", ""), - pair("content-disposition", ""), - pair("content-encoding", ""), - pair("content-language", ""), - pair("content-length", ""), - pair("content-location", ""), - pair("content-range", ""), - pair("content-type", ""), - pair("cookie", ""), - pair("date", ""), - pair("etag", ""), - pair("expect", ""), - pair("expires", ""), - pair("from", ""), - pair("host", ""), - pair("if-match", ""), - pair("if-modified-since", ""), - pair("if-none-match", ""), - pair("if-range", ""), - pair("if-unmodified-since", ""), - pair("last-modified", ""), - pair("link", ""), - pair("location", ""), - pair("max-forwards", ""), - pair("proxy-authenticate", ""), - pair("proxy-authorization", ""), - pair("range", ""), - pair("referer", ""), - pair("refresh", ""), - pair("retry-after", ""), - pair("server", ""), - pair("set-cookie", ""), - pair("strict-transport-security", ""), - pair("transfer-encoding", ""), - pair("user-agent", ""), - pair("vary", ""), - pair("via", ""), - pair("www-authenticate", ""), -} - -var huffmanCodes = [256]uint32{ - 0x1ff8, - 0x7fffd8, - 0xfffffe2, - 0xfffffe3, - 0xfffffe4, - 0xfffffe5, - 0xfffffe6, - 0xfffffe7, - 0xfffffe8, - 0xffffea, - 0x3ffffffc, - 0xfffffe9, - 0xfffffea, - 0x3ffffffd, - 0xfffffeb, - 0xfffffec, - 0xfffffed, - 0xfffffee, - 0xfffffef, - 0xffffff0, - 0xffffff1, - 0xffffff2, - 0x3ffffffe, - 0xffffff3, - 0xffffff4, - 0xffffff5, - 0xffffff6, - 0xffffff7, - 0xffffff8, - 0xffffff9, - 0xffffffa, - 0xffffffb, - 0x14, - 0x3f8, - 0x3f9, - 0xffa, - 0x1ff9, - 0x15, - 0xf8, - 0x7fa, - 0x3fa, - 0x3fb, - 0xf9, - 0x7fb, - 0xfa, - 0x16, - 0x17, - 0x18, - 0x0, - 0x1, - 0x2, - 0x19, - 0x1a, - 0x1b, - 0x1c, - 0x1d, - 0x1e, - 0x1f, - 0x5c, - 0xfb, - 0x7ffc, - 0x20, - 0xffb, - 0x3fc, - 0x1ffa, - 0x21, - 0x5d, - 0x5e, - 0x5f, - 0x60, - 0x61, - 0x62, - 0x63, - 0x64, - 0x65, - 0x66, - 0x67, - 0x68, - 0x69, - 0x6a, - 0x6b, - 0x6c, - 0x6d, - 0x6e, - 0x6f, - 0x70, - 0x71, - 0x72, - 0xfc, - 0x73, - 0xfd, - 0x1ffb, - 0x7fff0, - 0x1ffc, - 0x3ffc, - 0x22, - 0x7ffd, - 0x3, - 0x23, - 0x4, - 0x24, - 0x5, - 0x25, - 0x26, - 0x27, - 0x6, - 0x74, - 0x75, - 0x28, - 0x29, - 0x2a, - 0x7, - 0x2b, - 0x76, - 0x2c, - 0x8, - 0x9, - 0x2d, - 0x77, - 0x78, - 0x79, - 0x7a, - 0x7b, - 0x7ffe, - 0x7fc, - 0x3ffd, - 0x1ffd, - 0xffffffc, - 0xfffe6, - 0x3fffd2, - 0xfffe7, - 0xfffe8, - 0x3fffd3, - 0x3fffd4, - 0x3fffd5, - 0x7fffd9, - 0x3fffd6, - 0x7fffda, - 0x7fffdb, - 0x7fffdc, - 0x7fffdd, - 0x7fffde, - 0xffffeb, - 0x7fffdf, - 0xffffec, - 0xffffed, - 0x3fffd7, - 0x7fffe0, - 0xffffee, - 0x7fffe1, - 0x7fffe2, - 0x7fffe3, - 0x7fffe4, - 0x1fffdc, - 0x3fffd8, - 0x7fffe5, - 0x3fffd9, - 0x7fffe6, - 0x7fffe7, - 0xffffef, - 0x3fffda, - 0x1fffdd, - 0xfffe9, - 0x3fffdb, - 0x3fffdc, - 0x7fffe8, - 0x7fffe9, - 0x1fffde, - 0x7fffea, - 0x3fffdd, - 0x3fffde, - 0xfffff0, - 0x1fffdf, - 0x3fffdf, - 0x7fffeb, - 0x7fffec, - 0x1fffe0, - 0x1fffe1, - 0x3fffe0, - 0x1fffe2, - 0x7fffed, - 0x3fffe1, - 0x7fffee, - 0x7fffef, - 0xfffea, - 0x3fffe2, - 0x3fffe3, - 0x3fffe4, - 0x7ffff0, - 0x3fffe5, - 0x3fffe6, - 0x7ffff1, - 0x3ffffe0, - 0x3ffffe1, - 0xfffeb, - 0x7fff1, - 0x3fffe7, - 0x7ffff2, - 0x3fffe8, - 0x1ffffec, - 0x3ffffe2, - 0x3ffffe3, - 0x3ffffe4, - 0x7ffffde, - 0x7ffffdf, - 0x3ffffe5, - 0xfffff1, - 0x1ffffed, - 0x7fff2, - 0x1fffe3, - 0x3ffffe6, - 0x7ffffe0, - 0x7ffffe1, - 0x3ffffe7, - 0x7ffffe2, - 0xfffff2, - 0x1fffe4, - 0x1fffe5, - 0x3ffffe8, - 0x3ffffe9, - 0xffffffd, - 0x7ffffe3, - 0x7ffffe4, - 0x7ffffe5, - 0xfffec, - 0xfffff3, - 0xfffed, - 0x1fffe6, - 0x3fffe9, - 0x1fffe7, - 0x1fffe8, - 0x7ffff3, - 0x3fffea, - 0x3fffeb, - 0x1ffffee, - 0x1ffffef, - 0xfffff4, - 0xfffff5, - 0x3ffffea, - 0x7ffff4, - 0x3ffffeb, - 0x7ffffe6, - 0x3ffffec, - 0x3ffffed, - 0x7ffffe7, - 0x7ffffe8, - 0x7ffffe9, - 0x7ffffea, - 0x7ffffeb, - 0xffffffe, - 0x7ffffec, - 0x7ffffed, - 0x7ffffee, - 0x7ffffef, - 0x7fffff0, - 0x3ffffee, -} - -var huffmanCodeLen = [256]uint8{ - 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28, - 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28, - 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6, - 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10, - 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6, - 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5, - 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28, - 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23, - 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24, - 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23, - 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23, - 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25, - 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27, - 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23, - 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26, -} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go deleted file mode 100644 index 401923bc7c..0000000000 --- a/vendor/golang.org/x/net/http2/http2.go +++ /dev/null @@ -1,362 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package http2 implements the HTTP/2 protocol. -// -// This package is low-level and intended to be used directly by very -// few people. Most users will use it indirectly through the automatic -// use by the net/http package (from Go 1.6 and later). -// For use in earlier Go versions see ConfigureServer. (Transport support -// requires Go 1.6 or later) -// -// See https://http2.github.io/ for more information on HTTP/2. -// -// See https://http2.golang.org/ for a test server running this code. -// -package http2 // import "golang.org/x/net/http2" - -import ( - "bufio" - "crypto/tls" - "errors" - "fmt" - "io" - "net/http" - "os" - "sort" - "strconv" - "strings" - "sync" - - "golang.org/x/net/lex/httplex" -) - -var ( - VerboseLogs bool - logFrameWrites bool - logFrameReads bool -) - -func init() { - e := os.Getenv("GODEBUG") - if strings.Contains(e, "http2debug=1") { - VerboseLogs = true - } - if strings.Contains(e, "http2debug=2") { - VerboseLogs = true - logFrameWrites = true - logFrameReads = true - } -} - -const ( - // ClientPreface is the string that must be sent by new - // connections from clients. - ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n" - - // SETTINGS_MAX_FRAME_SIZE default - // http://http2.github.io/http2-spec/#rfc.section.6.5.2 - initialMaxFrameSize = 16384 - - // NextProtoTLS is the NPN/ALPN protocol negotiated during - // HTTP/2's TLS setup. - NextProtoTLS = "h2" - - // http://http2.github.io/http2-spec/#SettingValues - initialHeaderTableSize = 4096 - - initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size - - defaultMaxReadFrameSize = 1 << 20 -) - -var ( - clientPreface = []byte(ClientPreface) -) - -type streamState int - -const ( - stateIdle streamState = iota - stateOpen - stateHalfClosedLocal - stateHalfClosedRemote - stateResvLocal - stateResvRemote - stateClosed -) - -var stateName = [...]string{ - stateIdle: "Idle", - stateOpen: "Open", - stateHalfClosedLocal: "HalfClosedLocal", - stateHalfClosedRemote: "HalfClosedRemote", - stateResvLocal: "ResvLocal", - stateResvRemote: "ResvRemote", - stateClosed: "Closed", -} - -func (st streamState) String() string { - return stateName[st] -} - -// Setting is a setting parameter: which setting it is, and its value. -type Setting struct { - // ID is which setting is being set. - // See http://http2.github.io/http2-spec/#SettingValues - ID SettingID - - // Val is the value. - Val uint32 -} - -func (s Setting) String() string { - return fmt.Sprintf("[%v = %d]", s.ID, s.Val) -} - -// Valid reports whether the setting is valid. -func (s Setting) Valid() error { - // Limits and error codes from 6.5.2 Defined SETTINGS Parameters - switch s.ID { - case SettingEnablePush: - if s.Val != 1 && s.Val != 0 { - return ConnectionError(ErrCodeProtocol) - } - case SettingInitialWindowSize: - if s.Val > 1<<31-1 { - return ConnectionError(ErrCodeFlowControl) - } - case SettingMaxFrameSize: - if s.Val < 16384 || s.Val > 1<<24-1 { - return ConnectionError(ErrCodeProtocol) - } - } - return nil -} - -// A SettingID is an HTTP/2 setting as defined in -// http://http2.github.io/http2-spec/#iana-settings -type SettingID uint16 - -const ( - SettingHeaderTableSize SettingID = 0x1 - SettingEnablePush SettingID = 0x2 - SettingMaxConcurrentStreams SettingID = 0x3 - SettingInitialWindowSize SettingID = 0x4 - SettingMaxFrameSize SettingID = 0x5 - SettingMaxHeaderListSize SettingID = 0x6 -) - -var settingName = map[SettingID]string{ - SettingHeaderTableSize: "HEADER_TABLE_SIZE", - SettingEnablePush: "ENABLE_PUSH", - SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS", - SettingInitialWindowSize: "INITIAL_WINDOW_SIZE", - SettingMaxFrameSize: "MAX_FRAME_SIZE", - SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE", -} - -func (s SettingID) String() string { - if v, ok := settingName[s]; ok { - return v - } - return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s)) -} - -var ( - errInvalidHeaderFieldName = errors.New("http2: invalid header field name") - errInvalidHeaderFieldValue = errors.New("http2: invalid header field value") -) - -// validWireHeaderFieldName reports whether v is a valid header field -// name (key). See httplex.ValidHeaderName for the base rules. -// -// Further, http2 says: -// "Just as in HTTP/1.x, header field names are strings of ASCII -// characters that are compared in a case-insensitive -// fashion. However, header field names MUST be converted to -// lowercase prior to their encoding in HTTP/2. " -func validWireHeaderFieldName(v string) bool { - if len(v) == 0 { - return false - } - for _, r := range v { - if !httplex.IsTokenRune(r) { - return false - } - if 'A' <= r && r <= 'Z' { - return false - } - } - return true -} - -var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) - -func init() { - for i := 100; i <= 999; i++ { - if v := http.StatusText(i); v != "" { - httpCodeStringCommon[i] = strconv.Itoa(i) - } - } -} - -func httpCodeString(code int) string { - if s, ok := httpCodeStringCommon[code]; ok { - return s - } - return strconv.Itoa(code) -} - -// from pkg io -type stringWriter interface { - WriteString(s string) (n int, err error) -} - -// A gate lets two goroutines coordinate their activities. -type gate chan struct{} - -func (g gate) Done() { g <- struct{}{} } -func (g gate) Wait() { <-g } - -// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed). -type closeWaiter chan struct{} - -// Init makes a closeWaiter usable. -// It exists because so a closeWaiter value can be placed inside a -// larger struct and have the Mutex and Cond's memory in the same -// allocation. -func (cw *closeWaiter) Init() { - *cw = make(chan struct{}) -} - -// Close marks the closeWaiter as closed and unblocks any waiters. -func (cw closeWaiter) Close() { - close(cw) -} - -// Wait waits for the closeWaiter to become closed. -func (cw closeWaiter) Wait() { - <-cw -} - -// bufferedWriter is a buffered writer that writes to w. -// Its buffered writer is lazily allocated as needed, to minimize -// idle memory usage with many connections. -type bufferedWriter struct { - w io.Writer // immutable - bw *bufio.Writer // non-nil when data is buffered -} - -func newBufferedWriter(w io.Writer) *bufferedWriter { - return &bufferedWriter{w: w} -} - -var bufWriterPool = sync.Pool{ - New: func() interface{} { - // TODO: pick something better? this is a bit under - // (3 x typical 1500 byte MTU) at least. - return bufio.NewWriterSize(nil, 4<<10) - }, -} - -func (w *bufferedWriter) Write(p []byte) (n int, err error) { - if w.bw == nil { - bw := bufWriterPool.Get().(*bufio.Writer) - bw.Reset(w.w) - w.bw = bw - } - return w.bw.Write(p) -} - -func (w *bufferedWriter) Flush() error { - bw := w.bw - if bw == nil { - return nil - } - err := bw.Flush() - bw.Reset(nil) - bufWriterPool.Put(bw) - w.bw = nil - return err -} - -func mustUint31(v int32) uint32 { - if v < 0 || v > 2147483647 { - panic("out of range") - } - return uint32(v) -} - -// bodyAllowedForStatus reports whether a given response status code -// permits a body. See RFC 2616, section 4.4. -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -type httpError struct { - msg string - timeout bool -} - -func (e *httpError) Error() string { return e.msg } -func (e *httpError) Timeout() bool { return e.timeout } -func (e *httpError) Temporary() bool { return true } - -var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true} - -type connectionStater interface { - ConnectionState() tls.ConnectionState -} - -var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }} - -type sorter struct { - v []string // owned by sorter -} - -func (s *sorter) Len() int { return len(s.v) } -func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] } -func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] } - -// Keys returns the sorted keys of h. -// -// The returned slice is only valid until s used again or returned to -// its pool. -func (s *sorter) Keys(h http.Header) []string { - keys := s.v[:0] - for k := range h { - keys = append(keys, k) - } - s.v = keys - sort.Sort(s) - return keys -} - -func (s *sorter) SortStrings(ss []string) { - // Our sorter works on s.v, which sorter owners, so - // stash it away while we sort the user's buffer. - save := s.v - s.v = ss - sort.Sort(s) - s.v = save -} - -// validPseudoPath reports whether v is a valid :path pseudo-header -// value. It must be a non-empty string starting with '/', and not -// start with two slashes. -// For now this is only used a quick check for deciding when to clean -// up Opaque URLs before sending requests from the Transport. -// See golang.org/issue/16847 -func validPseudoPath(v string) bool { - return len(v) > 0 && v[0] == '/' && (len(v) == 1 || v[1] != '/') -} diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go deleted file mode 100644 index efd2e1282c..0000000000 --- a/vendor/golang.org/x/net/http2/not_go16.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.6 - -package http2 - -import ( - "crypto/tls" - "net/http" - "time" -) - -func configureTransport(t1 *http.Transport) (*Transport, error) { - return nil, errTransportVersion -} - -func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { - return 0 - -} - -// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. -func isBadCipher(cipher uint16) bool { - switch cipher { - case tls.TLS_RSA_WITH_RC4_128_SHA, - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: - // Reject cipher suites from Appendix A. - // "This list includes those cipher suites that do not - // offer an ephemeral key exchange and those that are - // based on the TLS null, stream or block cipher type" - return true - default: - return false - } -} diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go deleted file mode 100644 index 28df0c16bf..0000000000 --- a/vendor/golang.org/x/net/http2/not_go17.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package http2 - -import ( - "net" - "net/http" -) - -type contextContext interface{} - -type fakeContext struct{} - -func (fakeContext) Done() <-chan struct{} { return nil } -func (fakeContext) Err() error { panic("should not be called") } - -func reqContext(r *http.Request) fakeContext { - return fakeContext{} -} - -func setResponseUncompressed(res *http.Response) { - // Nothing. -} - -type clientTrace struct{} - -func requestTrace(*http.Request) *clientTrace { return nil } -func traceGotConn(*http.Request, *ClientConn) {} -func traceFirstResponseByte(*clientTrace) {} -func traceWroteHeaders(*clientTrace) {} -func traceWroteRequest(*clientTrace, error) {} -func traceGot100Continue(trace *clientTrace) {} -func traceWait100Continue(trace *clientTrace) {} - -func nop() {} - -func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { - return nil, nop -} - -func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { - return ctx, nop -} - -func requestWithContext(req *http.Request, ctx contextContext) *http.Request { - return req -} diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go deleted file mode 100644 index 53b7a1daf6..0000000000 --- a/vendor/golang.org/x/net/http2/pipe.go +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" - "io" - "sync" -) - -// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like -// io.Pipe except there are no PipeReader/PipeWriter halves, and the -// underlying buffer is an interface. (io.Pipe is always unbuffered) -type pipe struct { - mu sync.Mutex - c sync.Cond // c.L lazily initialized to &p.mu - b pipeBuffer - err error // read error once empty. non-nil means closed. - breakErr error // immediate read error (caller doesn't see rest of b) - donec chan struct{} // closed on error - readFn func() // optional code to run in Read before error -} - -type pipeBuffer interface { - Len() int - io.Writer - io.Reader -} - -func (p *pipe) Len() int { - p.mu.Lock() - defer p.mu.Unlock() - return p.b.Len() -} - -// Read waits until data is available and copies bytes -// from the buffer into p. -func (p *pipe) Read(d []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - for { - if p.breakErr != nil { - return 0, p.breakErr - } - if p.b.Len() > 0 { - return p.b.Read(d) - } - if p.err != nil { - if p.readFn != nil { - p.readFn() // e.g. copy trailers - p.readFn = nil // not sticky like p.err - } - return 0, p.err - } - p.c.Wait() - } -} - -var errClosedPipeWrite = errors.New("write on closed buffer") - -// Write copies bytes from p into the buffer and wakes a reader. -// It is an error to write more data than the buffer can hold. -func (p *pipe) Write(d []byte) (n int, err error) { - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - defer p.c.Signal() - if p.err != nil { - return 0, errClosedPipeWrite - } - return p.b.Write(d) -} - -// CloseWithError causes the next Read (waking up a current blocked -// Read if needed) to return the provided err after all data has been -// read. -// -// The error must be non-nil. -func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) } - -// BreakWithError causes the next Read (waking up a current blocked -// Read if needed) to return the provided err immediately, without -// waiting for unread data. -func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) } - -// closeWithErrorAndCode is like CloseWithError but also sets some code to run -// in the caller's goroutine before returning the error. -func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) } - -func (p *pipe) closeWithError(dst *error, err error, fn func()) { - if err == nil { - panic("err must be non-nil") - } - p.mu.Lock() - defer p.mu.Unlock() - if p.c.L == nil { - p.c.L = &p.mu - } - defer p.c.Signal() - if *dst != nil { - // Already been done. - return - } - p.readFn = fn - *dst = err - p.closeDoneLocked() -} - -// requires p.mu be held. -func (p *pipe) closeDoneLocked() { - if p.donec == nil { - return - } - // Close if unclosed. This isn't racy since we always - // hold p.mu while closing. - select { - case <-p.donec: - default: - close(p.donec) - } -} - -// Err returns the error (if any) first set by BreakWithError or CloseWithError. -func (p *pipe) Err() error { - p.mu.Lock() - defer p.mu.Unlock() - if p.breakErr != nil { - return p.breakErr - } - return p.err -} - -// Done returns a channel which is closed if and when this pipe is closed -// with CloseWithError. -func (p *pipe) Done() <-chan struct{} { - p.mu.Lock() - defer p.mu.Unlock() - if p.donec == nil { - p.donec = make(chan struct{}) - if p.err != nil || p.breakErr != nil { - // Already hit an error. - p.closeDoneLocked() - } - } - return p.donec -} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go deleted file mode 100644 index 8206fa79d8..0000000000 --- a/vendor/golang.org/x/net/http2/server.go +++ /dev/null @@ -1,2292 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// TODO: replace all <-sc.doneServing with reads from the stream's cw -// instead, and make sure that on close we close all open -// streams. then remove doneServing? - -// TODO: re-audit GOAWAY support. Consider each incoming frame type and -// whether it should be ignored during graceful shutdown. - -// TODO: disconnect idle clients. GFE seems to do 4 minutes. make -// configurable? or maximum number of idle clients and remove the -// oldest? - -// TODO: turn off the serve goroutine when idle, so -// an idle conn only has the readFrames goroutine active. (which could -// also be optimized probably to pin less memory in crypto/tls). This -// would involve tracking when the serve goroutine is active (atomic -// int32 read/CAS probably?) and starting it up when frames arrive, -// and shutting it down when all handlers exit. the occasional PING -// packets could use time.AfterFunc to call sc.wakeStartServeLoop() -// (which is a no-op if already running) and then queue the PING write -// as normal. The serve loop would then exit in most cases (if no -// Handlers running) and not be woken up again until the PING packet -// returns. - -// TODO (maybe): add a mechanism for Handlers to going into -// half-closed-local mode (rw.(io.Closer) test?) but not exit their -// handler, and continue to be able to read from the -// Request.Body. This would be a somewhat semantic change from HTTP/1 -// (or at least what we expose in net/http), so I'd probably want to -// add it there too. For now, this package says that returning from -// the Handler ServeHTTP function means you're both done reading and -// done writing, without a way to stop just one or the other. - -package http2 - -import ( - "bufio" - "bytes" - "crypto/tls" - "errors" - "fmt" - "io" - "log" - "net" - "net/http" - "net/textproto" - "net/url" - "os" - "reflect" - "runtime" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/net/http2/hpack" -) - -const ( - prefaceTimeout = 10 * time.Second - firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway - handlerChunkWriteSize = 4 << 10 - defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to? -) - -var ( - errClientDisconnected = errors.New("client disconnected") - errClosedBody = errors.New("body closed by handler") - errHandlerComplete = errors.New("http2: request body closed due to handler exiting") - errStreamClosed = errors.New("http2: stream closed") -) - -var responseWriterStatePool = sync.Pool{ - New: func() interface{} { - rws := &responseWriterState{} - rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize) - return rws - }, -} - -// Test hooks. -var ( - testHookOnConn func() - testHookGetServerConn func(*serverConn) - testHookOnPanicMu *sync.Mutex // nil except in tests - testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool) -) - -// Server is an HTTP/2 server. -type Server struct { - // MaxHandlers limits the number of http.Handler ServeHTTP goroutines - // which may run at a time over all connections. - // Negative or zero no limit. - // TODO: implement - MaxHandlers int - - // MaxConcurrentStreams optionally specifies the number of - // concurrent streams that each client may have open at a - // time. This is unrelated to the number of http.Handler goroutines - // which may be active globally, which is MaxHandlers. - // If zero, MaxConcurrentStreams defaults to at least 100, per - // the HTTP/2 spec's recommendations. - MaxConcurrentStreams uint32 - - // MaxReadFrameSize optionally specifies the largest frame - // this server is willing to read. A valid value is between - // 16k and 16M, inclusive. If zero or otherwise invalid, a - // default value is used. - MaxReadFrameSize uint32 - - // PermitProhibitedCipherSuites, if true, permits the use of - // cipher suites prohibited by the HTTP/2 spec. - PermitProhibitedCipherSuites bool -} - -func (s *Server) maxReadFrameSize() uint32 { - if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize { - return v - } - return defaultMaxReadFrameSize -} - -func (s *Server) maxConcurrentStreams() uint32 { - if v := s.MaxConcurrentStreams; v > 0 { - return v - } - return defaultMaxStreams -} - -// ConfigureServer adds HTTP/2 support to a net/http Server. -// -// The configuration conf may be nil. -// -// ConfigureServer must be called before s begins serving. -func ConfigureServer(s *http.Server, conf *Server) error { - if conf == nil { - conf = new(Server) - } - - if s.TLSConfig == nil { - s.TLSConfig = new(tls.Config) - } else if s.TLSConfig.CipherSuites != nil { - // If they already provided a CipherSuite list, return - // an error if it has a bad order or is missing - // ECDHE_RSA_WITH_AES_128_GCM_SHA256. - const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - haveRequired := false - sawBad := false - for i, cs := range s.TLSConfig.CipherSuites { - if cs == requiredCipher { - haveRequired = true - } - if isBadCipher(cs) { - sawBad = true - } else if sawBad { - return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs) - } - } - if !haveRequired { - return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256") - } - } - - // Note: not setting MinVersion to tls.VersionTLS12, - // as we don't want to interfere with HTTP/1.1 traffic - // on the user's server. We enforce TLS 1.2 later once - // we accept a connection. Ideally this should be done - // during next-proto selection, but using TLS <1.2 with - // HTTP/2 is still the client's bug. - - s.TLSConfig.PreferServerCipherSuites = true - - haveNPN := false - for _, p := range s.TLSConfig.NextProtos { - if p == NextProtoTLS { - haveNPN = true - break - } - } - if !haveNPN { - s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS) - } - // h2-14 is temporary (as of 2015-03-05) while we wait for all browsers - // to switch to "h2". - s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "h2-14") - - if s.TLSNextProto == nil { - s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){} - } - protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) { - if testHookOnConn != nil { - testHookOnConn() - } - conf.ServeConn(c, &ServeConnOpts{ - Handler: h, - BaseConfig: hs, - }) - } - s.TLSNextProto[NextProtoTLS] = protoHandler - s.TLSNextProto["h2-14"] = protoHandler // temporary; see above. - return nil -} - -// ServeConnOpts are options for the Server.ServeConn method. -type ServeConnOpts struct { - // BaseConfig optionally sets the base configuration - // for values. If nil, defaults are used. - BaseConfig *http.Server - - // Handler specifies which handler to use for processing - // requests. If nil, BaseConfig.Handler is used. If BaseConfig - // or BaseConfig.Handler is nil, http.DefaultServeMux is used. - Handler http.Handler -} - -func (o *ServeConnOpts) baseConfig() *http.Server { - if o != nil && o.BaseConfig != nil { - return o.BaseConfig - } - return new(http.Server) -} - -func (o *ServeConnOpts) handler() http.Handler { - if o != nil { - if o.Handler != nil { - return o.Handler - } - if o.BaseConfig != nil && o.BaseConfig.Handler != nil { - return o.BaseConfig.Handler - } - } - return http.DefaultServeMux -} - -// ServeConn serves HTTP/2 requests on the provided connection and -// blocks until the connection is no longer readable. -// -// ServeConn starts speaking HTTP/2 assuming that c has not had any -// reads or writes. It writes its initial settings frame and expects -// to be able to read the preface and settings frame from the -// client. If c has a ConnectionState method like a *tls.Conn, the -// ConnectionState is used to verify the TLS ciphersuite and to set -// the Request.TLS field in Handlers. -// -// ServeConn does not support h2c by itself. Any h2c support must be -// implemented in terms of providing a suitably-behaving net.Conn. -// -// The opts parameter is optional. If nil, default values are used. -func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { - baseCtx, cancel := serverConnBaseContext(c, opts) - defer cancel() - - sc := &serverConn{ - srv: s, - hs: opts.baseConfig(), - conn: c, - baseCtx: baseCtx, - remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), - handler: opts.handler(), - streams: make(map[uint32]*stream), - readFrameCh: make(chan readFrameResult), - wantWriteFrameCh: make(chan frameWriteMsg, 8), - wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync - bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way - doneServing: make(chan struct{}), - advMaxStreams: s.maxConcurrentStreams(), - writeSched: writeScheduler{ - maxFrameSize: initialMaxFrameSize, - }, - initialWindowSize: initialWindowSize, - headerTableSize: initialHeaderTableSize, - serveG: newGoroutineLock(), - pushEnabled: true, - } - - sc.flow.add(initialWindowSize) - sc.inflow.add(initialWindowSize) - sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) - - fr := NewFramer(sc.bw, c) - fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) - fr.MaxHeaderListSize = sc.maxHeaderListSize() - fr.SetMaxReadFrameSize(s.maxReadFrameSize()) - sc.framer = fr - - if tc, ok := c.(connectionStater); ok { - sc.tlsState = new(tls.ConnectionState) - *sc.tlsState = tc.ConnectionState() - // 9.2 Use of TLS Features - // An implementation of HTTP/2 over TLS MUST use TLS - // 1.2 or higher with the restrictions on feature set - // and cipher suite described in this section. Due to - // implementation limitations, it might not be - // possible to fail TLS negotiation. An endpoint MUST - // immediately terminate an HTTP/2 connection that - // does not meet the TLS requirements described in - // this section with a connection error (Section - // 5.4.1) of type INADEQUATE_SECURITY. - if sc.tlsState.Version < tls.VersionTLS12 { - sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low") - return - } - - if sc.tlsState.ServerName == "" { - // Client must use SNI, but we don't enforce that anymore, - // since it was causing problems when connecting to bare IP - // addresses during development. - // - // TODO: optionally enforce? Or enforce at the time we receive - // a new request, and verify the the ServerName matches the :authority? - // But that precludes proxy situations, perhaps. - // - // So for now, do nothing here again. - } - - if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) { - // "Endpoints MAY choose to generate a connection error - // (Section 5.4.1) of type INADEQUATE_SECURITY if one of - // the prohibited cipher suites are negotiated." - // - // We choose that. In my opinion, the spec is weak - // here. It also says both parties must support at least - // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no - // excuses here. If we really must, we could allow an - // "AllowInsecureWeakCiphers" option on the server later. - // Let's see how it plays out first. - sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite)) - return - } - } - - if hook := testHookGetServerConn; hook != nil { - hook(sc) - } - sc.serve() -} - -func (sc *serverConn) rejectConn(err ErrCode, debug string) { - sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) - // ignoring errors. hanging up anyway. - sc.framer.WriteGoAway(0, err, []byte(debug)) - sc.bw.Flush() - sc.conn.Close() -} - -type serverConn struct { - // Immutable: - srv *Server - hs *http.Server - conn net.Conn - bw *bufferedWriter // writing to conn - handler http.Handler - baseCtx contextContext - framer *Framer - doneServing chan struct{} // closed when serverConn.serve ends - readFrameCh chan readFrameResult // written by serverConn.readFrames - wantWriteFrameCh chan frameWriteMsg // from handlers -> serve - wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes - bodyReadCh chan bodyReadMsg // from handlers -> serve - testHookCh chan func(int) // code to run on the serve loop - flow flow // conn-wide (not stream-specific) outbound flow control - inflow flow // conn-wide inbound flow control - tlsState *tls.ConnectionState // shared by all handlers, like net/http - remoteAddrStr string - - // Everything following is owned by the serve loop; use serveG.check(): - serveG goroutineLock // used to verify funcs are on serve() - pushEnabled bool - sawFirstSettings bool // got the initial SETTINGS frame after the preface - needToSendSettingsAck bool - unackedSettings int // how many SETTINGS have we sent without ACKs? - clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) - advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client - curOpenStreams uint32 // client's number of open streams - maxStreamID uint32 // max ever seen - streams map[uint32]*stream - initialWindowSize int32 - headerTableSize uint32 - peerMaxHeaderListSize uint32 // zero means unknown (default) - canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case - writingFrame bool // started write goroutine but haven't heard back on wroteFrameCh - needsFrameFlush bool // last frame write wasn't a flush - writeSched writeScheduler - inGoAway bool // we've started to or sent GOAWAY - needToSendGoAway bool // we need to schedule a GOAWAY frame write - goAwayCode ErrCode - shutdownTimerCh <-chan time.Time // nil until used - shutdownTimer *time.Timer // nil until used - freeRequestBodyBuf []byte // if non-nil, a free initialWindowSize buffer for getRequestBodyBuf - - // Owned by the writeFrameAsync goroutine: - headerWriteBuf bytes.Buffer - hpackEncoder *hpack.Encoder -} - -func (sc *serverConn) maxHeaderListSize() uint32 { - n := sc.hs.MaxHeaderBytes - if n <= 0 { - n = http.DefaultMaxHeaderBytes - } - // http2's count is in a slightly different unit and includes 32 bytes per pair. - // So, take the net/http.Server value and pad it up a bit, assuming 10 headers. - const perFieldOverhead = 32 // per http2 spec - const typicalHeaders = 10 // conservative - return uint32(n + typicalHeaders*perFieldOverhead) -} - -// stream represents a stream. This is the minimal metadata needed by -// the serve goroutine. Most of the actual stream state is owned by -// the http.Handler's goroutine in the responseWriter. Because the -// responseWriter's responseWriterState is recycled at the end of a -// handler, this struct intentionally has no pointer to the -// *responseWriter{,State} itself, as the Handler ending nils out the -// responseWriter's state field. -type stream struct { - // immutable: - sc *serverConn - id uint32 - body *pipe // non-nil if expecting DATA frames - cw closeWaiter // closed wait stream transitions to closed state - ctx contextContext - cancelCtx func() - - // owned by serverConn's serve loop: - bodyBytes int64 // body bytes seen so far - declBodyBytes int64 // or -1 if undeclared - flow flow // limits writing from Handler to client - inflow flow // what the client is allowed to POST/etc to us - parent *stream // or nil - numTrailerValues int64 - weight uint8 - state streamState - sentReset bool // only true once detached from streams map - gotReset bool // only true once detacted from streams map - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - reqBuf []byte - - trailer http.Header // accumulated trailers - reqTrailer http.Header // handler's Request.Trailer -} - -func (sc *serverConn) Framer() *Framer { return sc.framer } -func (sc *serverConn) CloseConn() error { return sc.conn.Close() } -func (sc *serverConn) Flush() error { return sc.bw.Flush() } -func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) { - return sc.hpackEncoder, &sc.headerWriteBuf -} - -func (sc *serverConn) state(streamID uint32) (streamState, *stream) { - sc.serveG.check() - // http://http2.github.io/http2-spec/#rfc.section.5.1 - if st, ok := sc.streams[streamID]; ok { - return st.state, st - } - // "The first use of a new stream identifier implicitly closes all - // streams in the "idle" state that might have been initiated by - // that peer with a lower-valued stream identifier. For example, if - // a client sends a HEADERS frame on stream 7 without ever sending a - // frame on stream 5, then stream 5 transitions to the "closed" - // state when the first frame for stream 7 is sent or received." - if streamID <= sc.maxStreamID { - return stateClosed, nil - } - return stateIdle, nil -} - -// setConnState calls the net/http ConnState hook for this connection, if configured. -// Note that the net/http package does StateNew and StateClosed for us. -// There is currently no plan for StateHijacked or hijacking HTTP/2 connections. -func (sc *serverConn) setConnState(state http.ConnState) { - if sc.hs.ConnState != nil { - sc.hs.ConnState(sc.conn, state) - } -} - -func (sc *serverConn) vlogf(format string, args ...interface{}) { - if VerboseLogs { - sc.logf(format, args...) - } -} - -func (sc *serverConn) logf(format string, args ...interface{}) { - if lg := sc.hs.ErrorLog; lg != nil { - lg.Printf(format, args...) - } else { - log.Printf(format, args...) - } -} - -// errno returns v's underlying uintptr, else 0. -// -// TODO: remove this helper function once http2 can use build -// tags. See comment in isClosedConnError. -func errno(v error) uintptr { - if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr { - return uintptr(rv.Uint()) - } - return 0 -} - -// isClosedConnError reports whether err is an error from use of a closed -// network connection. -func isClosedConnError(err error) bool { - if err == nil { - return false - } - - // TODO: remove this string search and be more like the Windows - // case below. That might involve modifying the standard library - // to return better error types. - str := err.Error() - if strings.Contains(str, "use of closed network connection") { - return true - } - - // TODO(bradfitz): x/tools/cmd/bundle doesn't really support - // build tags, so I can't make an http2_windows.go file with - // Windows-specific stuff. Fix that and move this, once we - // have a way to bundle this into std's net/http somehow. - if runtime.GOOS == "windows" { - if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { - if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" { - const WSAECONNABORTED = 10053 - const WSAECONNRESET = 10054 - if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED { - return true - } - } - } - } - return false -} - -func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { - if err == nil { - return - } - if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) { - // Boring, expected errors. - sc.vlogf(format, args...) - } else { - sc.logf(format, args...) - } -} - -func (sc *serverConn) canonicalHeader(v string) string { - sc.serveG.check() - cv, ok := commonCanonHeader[v] - if ok { - return cv - } - cv, ok = sc.canonHeader[v] - if ok { - return cv - } - if sc.canonHeader == nil { - sc.canonHeader = make(map[string]string) - } - cv = http.CanonicalHeaderKey(v) - sc.canonHeader[v] = cv - return cv -} - -type readFrameResult struct { - f Frame // valid until readMore is called - err error - - // readMore should be called once the consumer no longer needs or - // retains f. After readMore, f is invalid and more frames can be - // read. - readMore func() -} - -// readFrames is the loop that reads incoming frames. -// It takes care to only read one frame at a time, blocking until the -// consumer is done with the frame. -// It's run on its own goroutine. -func (sc *serverConn) readFrames() { - gate := make(gate) - gateDone := gate.Done - for { - f, err := sc.framer.ReadFrame() - select { - case sc.readFrameCh <- readFrameResult{f, err, gateDone}: - case <-sc.doneServing: - return - } - select { - case <-gate: - case <-sc.doneServing: - return - } - if terminalReadFrameError(err) { - return - } - } -} - -// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine. -type frameWriteResult struct { - wm frameWriteMsg // what was written (or attempted) - err error // result of the writeFrame call -} - -// writeFrameAsync runs in its own goroutine and writes a single frame -// and then reports when it's done. -// At most one goroutine can be running writeFrameAsync at a time per -// serverConn. -func (sc *serverConn) writeFrameAsync(wm frameWriteMsg) { - err := wm.write.writeFrame(sc) - sc.wroteFrameCh <- frameWriteResult{wm, err} -} - -func (sc *serverConn) closeAllStreamsOnConnClose() { - sc.serveG.check() - for _, st := range sc.streams { - sc.closeStream(st, errClientDisconnected) - } -} - -func (sc *serverConn) stopShutdownTimer() { - sc.serveG.check() - if t := sc.shutdownTimer; t != nil { - t.Stop() - } -} - -func (sc *serverConn) notePanic() { - // Note: this is for serverConn.serve panicking, not http.Handler code. - if testHookOnPanicMu != nil { - testHookOnPanicMu.Lock() - defer testHookOnPanicMu.Unlock() - } - if testHookOnPanic != nil { - if e := recover(); e != nil { - if testHookOnPanic(sc, e) { - panic(e) - } - } - } -} - -func (sc *serverConn) serve() { - sc.serveG.check() - defer sc.notePanic() - defer sc.conn.Close() - defer sc.closeAllStreamsOnConnClose() - defer sc.stopShutdownTimer() - defer close(sc.doneServing) // unblocks handlers trying to send - - if VerboseLogs { - sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs) - } - - sc.writeFrame(frameWriteMsg{ - write: writeSettings{ - {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, - {SettingMaxConcurrentStreams, sc.advMaxStreams}, - {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - - // TODO: more actual settings, notably - // SettingInitialWindowSize, but then we also - // want to bump up the conn window size the - // same amount here right after the settings - }, - }) - sc.unackedSettings++ - - if err := sc.readPreface(); err != nil { - sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) - return - } - // Now that we've got the preface, get us out of the - // "StateNew" state. We can't go directly to idle, though. - // Active means we read some data and anticipate a request. We'll - // do another Active when we get a HEADERS frame. - sc.setConnState(http.StateActive) - sc.setConnState(http.StateIdle) - - go sc.readFrames() // closed by defer sc.conn.Close above - - settingsTimer := time.NewTimer(firstSettingsTimeout) - loopNum := 0 - for { - loopNum++ - select { - case wm := <-sc.wantWriteFrameCh: - sc.writeFrame(wm) - case res := <-sc.wroteFrameCh: - sc.wroteFrame(res) - case res := <-sc.readFrameCh: - if !sc.processFrameFromReader(res) { - return - } - res.readMore() - if settingsTimer.C != nil { - settingsTimer.Stop() - settingsTimer.C = nil - } - case m := <-sc.bodyReadCh: - sc.noteBodyRead(m.st, m.n) - case <-settingsTimer.C: - sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) - return - case <-sc.shutdownTimerCh: - sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) - return - case fn := <-sc.testHookCh: - fn(loopNum) - } - } -} - -// readPreface reads the ClientPreface greeting from the peer -// or returns an error on timeout or an invalid greeting. -func (sc *serverConn) readPreface() error { - errc := make(chan error, 1) - go func() { - // Read the client preface - buf := make([]byte, len(ClientPreface)) - if _, err := io.ReadFull(sc.conn, buf); err != nil { - errc <- err - } else if !bytes.Equal(buf, clientPreface) { - errc <- fmt.Errorf("bogus greeting %q", buf) - } else { - errc <- nil - } - }() - timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? - defer timer.Stop() - select { - case <-timer.C: - return errors.New("timeout waiting for client preface") - case err := <-errc: - if err == nil { - if VerboseLogs { - sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr()) - } - } - return err - } -} - -var errChanPool = sync.Pool{ - New: func() interface{} { return make(chan error, 1) }, -} - -var writeDataPool = sync.Pool{ - New: func() interface{} { return new(writeData) }, -} - -// writeDataFromHandler writes DATA response frames from a handler on -// the given stream. -func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { - ch := errChanPool.Get().(chan error) - writeArg := writeDataPool.Get().(*writeData) - *writeArg = writeData{stream.id, data, endStream} - err := sc.writeFrameFromHandler(frameWriteMsg{ - write: writeArg, - stream: stream, - done: ch, - }) - if err != nil { - return err - } - var frameWriteDone bool // the frame write is done (successfully or not) - select { - case err = <-ch: - frameWriteDone = true - case <-sc.doneServing: - return errClientDisconnected - case <-stream.cw: - // If both ch and stream.cw were ready (as might - // happen on the final Write after an http.Handler - // ends), prefer the write result. Otherwise this - // might just be us successfully closing the stream. - // The writeFrameAsync and serve goroutines guarantee - // that the ch send will happen before the stream.cw - // close. - select { - case err = <-ch: - frameWriteDone = true - default: - return errStreamClosed - } - } - errChanPool.Put(ch) - if frameWriteDone { - writeDataPool.Put(writeArg) - } - return err -} - -// writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts -// if the connection has gone away. -// -// This must not be run from the serve goroutine itself, else it might -// deadlock writing to sc.wantWriteFrameCh (which is only mildly -// buffered and is read by serve itself). If you're on the serve -// goroutine, call writeFrame instead. -func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error { - sc.serveG.checkNotOn() // NOT - select { - case sc.wantWriteFrameCh <- wm: - return nil - case <-sc.doneServing: - // Serve loop is gone. - // Client has closed their connection to the server. - return errClientDisconnected - } -} - -// writeFrame schedules a frame to write and sends it if there's nothing -// already being written. -// -// There is no pushback here (the serve goroutine never blocks). It's -// the http.Handlers that block, waiting for their previous frames to -// make it onto the wire -// -// If you're not on the serve goroutine, use writeFrameFromHandler instead. -func (sc *serverConn) writeFrame(wm frameWriteMsg) { - sc.serveG.check() - - var ignoreWrite bool - - // Don't send a 100-continue response if we've already sent headers. - // See golang.org/issue/14030. - switch wm.write.(type) { - case *writeResHeaders: - wm.stream.wroteHeaders = true - case write100ContinueHeadersFrame: - if wm.stream.wroteHeaders { - ignoreWrite = true - } - } - - if !ignoreWrite { - sc.writeSched.add(wm) - } - sc.scheduleFrameWrite() -} - -// startFrameWrite starts a goroutine to write wm (in a separate -// goroutine since that might block on the network), and updates the -// serve goroutine's state about the world, updated from info in wm. -func (sc *serverConn) startFrameWrite(wm frameWriteMsg) { - sc.serveG.check() - if sc.writingFrame { - panic("internal error: can only be writing one frame at a time") - } - - st := wm.stream - if st != nil { - switch st.state { - case stateHalfClosedLocal: - panic("internal error: attempt to send frame on half-closed-local stream") - case stateClosed: - if st.sentReset || st.gotReset { - // Skip this frame. - sc.scheduleFrameWrite() - return - } - panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wm)) - } - } - - sc.writingFrame = true - sc.needsFrameFlush = true - go sc.writeFrameAsync(wm) -} - -// errHandlerPanicked is the error given to any callers blocked in a read from -// Request.Body when the main goroutine panics. Since most handlers read in the -// the main ServeHTTP goroutine, this will show up rarely. -var errHandlerPanicked = errors.New("http2: handler panicked") - -// wroteFrame is called on the serve goroutine with the result of -// whatever happened on writeFrameAsync. -func (sc *serverConn) wroteFrame(res frameWriteResult) { - sc.serveG.check() - if !sc.writingFrame { - panic("internal error: expected to be already writing a frame") - } - sc.writingFrame = false - - wm := res.wm - st := wm.stream - - closeStream := endsStream(wm.write) - - if _, ok := wm.write.(handlerPanicRST); ok { - sc.closeStream(st, errHandlerPanicked) - } - - // Reply (if requested) to the blocked ServeHTTP goroutine. - if ch := wm.done; ch != nil { - select { - case ch <- res.err: - default: - panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write)) - } - } - wm.write = nil // prevent use (assume it's tainted after wm.done send) - - if closeStream { - if st == nil { - panic("internal error: expecting non-nil stream") - } - switch st.state { - case stateOpen: - // Here we would go to stateHalfClosedLocal in - // theory, but since our handler is done and - // the net/http package provides no mechanism - // for finishing writing to a ResponseWriter - // while still reading data (see possible TODO - // at top of this file), we go into closed - // state here anyway, after telling the peer - // we're hanging up on them. - st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream - errCancel := streamError(st.id, ErrCodeCancel) - sc.resetStream(errCancel) - case stateHalfClosedRemote: - sc.closeStream(st, errHandlerComplete) - } - } - - sc.scheduleFrameWrite() -} - -// scheduleFrameWrite tickles the frame writing scheduler. -// -// If a frame is already being written, nothing happens. This will be called again -// when the frame is done being written. -// -// If a frame isn't being written we need to send one, the best frame -// to send is selected, preferring first things that aren't -// stream-specific (e.g. ACKing settings), and then finding the -// highest priority stream. -// -// If a frame isn't being written and there's nothing else to send, we -// flush the write buffer. -func (sc *serverConn) scheduleFrameWrite() { - sc.serveG.check() - if sc.writingFrame { - return - } - if sc.needToSendGoAway { - sc.needToSendGoAway = false - sc.startFrameWrite(frameWriteMsg{ - write: &writeGoAway{ - maxStreamID: sc.maxStreamID, - code: sc.goAwayCode, - }, - }) - return - } - if sc.needToSendSettingsAck { - sc.needToSendSettingsAck = false - sc.startFrameWrite(frameWriteMsg{write: writeSettingsAck{}}) - return - } - if !sc.inGoAway { - if wm, ok := sc.writeSched.take(); ok { - sc.startFrameWrite(wm) - return - } - } - if sc.needsFrameFlush { - sc.startFrameWrite(frameWriteMsg{write: flushFrameWriter{}}) - sc.needsFrameFlush = false // after startFrameWrite, since it sets this true - return - } -} - -func (sc *serverConn) goAway(code ErrCode) { - sc.serveG.check() - if sc.inGoAway { - return - } - if code != ErrCodeNo { - sc.shutDownIn(250 * time.Millisecond) - } else { - // TODO: configurable - sc.shutDownIn(1 * time.Second) - } - sc.inGoAway = true - sc.needToSendGoAway = true - sc.goAwayCode = code - sc.scheduleFrameWrite() -} - -func (sc *serverConn) shutDownIn(d time.Duration) { - sc.serveG.check() - sc.shutdownTimer = time.NewTimer(d) - sc.shutdownTimerCh = sc.shutdownTimer.C -} - -func (sc *serverConn) resetStream(se StreamError) { - sc.serveG.check() - sc.writeFrame(frameWriteMsg{write: se}) - if st, ok := sc.streams[se.StreamID]; ok { - st.sentReset = true - sc.closeStream(st, se) - } -} - -// processFrameFromReader processes the serve loop's read from readFrameCh from the -// frame-reading goroutine. -// processFrameFromReader returns whether the connection should be kept open. -func (sc *serverConn) processFrameFromReader(res readFrameResult) bool { - sc.serveG.check() - err := res.err - if err != nil { - if err == ErrFrameTooLarge { - sc.goAway(ErrCodeFrameSize) - return true // goAway will close the loop - } - clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) - if clientGone { - // TODO: could we also get into this state if - // the peer does a half close - // (e.g. CloseWrite) because they're done - // sending frames but they're still wanting - // our open replies? Investigate. - // TODO: add CloseWrite to crypto/tls.Conn first - // so we have a way to test this? I suppose - // just for testing we could have a non-TLS mode. - return false - } - } else { - f := res.f - if VerboseLogs { - sc.vlogf("http2: server read frame %v", summarizeFrame(f)) - } - err = sc.processFrame(f) - if err == nil { - return true - } - } - - switch ev := err.(type) { - case StreamError: - sc.resetStream(ev) - return true - case goAwayFlowError: - sc.goAway(ErrCodeFlowControl) - return true - case ConnectionError: - sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev) - sc.goAway(ErrCode(ev)) - return true // goAway will handle shutdown - default: - if res.err != nil { - sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err) - } else { - sc.logf("http2: server closing client connection: %v", err) - } - return false - } -} - -func (sc *serverConn) processFrame(f Frame) error { - sc.serveG.check() - - // First frame received must be SETTINGS. - if !sc.sawFirstSettings { - if _, ok := f.(*SettingsFrame); !ok { - return ConnectionError(ErrCodeProtocol) - } - sc.sawFirstSettings = true - } - - switch f := f.(type) { - case *SettingsFrame: - return sc.processSettings(f) - case *MetaHeadersFrame: - return sc.processHeaders(f) - case *WindowUpdateFrame: - return sc.processWindowUpdate(f) - case *PingFrame: - return sc.processPing(f) - case *DataFrame: - return sc.processData(f) - case *RSTStreamFrame: - return sc.processResetStream(f) - case *PriorityFrame: - return sc.processPriority(f) - case *PushPromiseFrame: - // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE - // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR. - return ConnectionError(ErrCodeProtocol) - default: - sc.vlogf("http2: server ignoring frame: %v", f.Header()) - return nil - } -} - -func (sc *serverConn) processPing(f *PingFrame) error { - sc.serveG.check() - if f.IsAck() { - // 6.7 PING: " An endpoint MUST NOT respond to PING frames - // containing this flag." - return nil - } - if f.StreamID != 0 { - // "PING frames are not associated with any individual - // stream. If a PING frame is received with a stream - // identifier field value other than 0x0, the recipient MUST - // respond with a connection error (Section 5.4.1) of type - // PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) - } - sc.writeFrame(frameWriteMsg{write: writePingAck{f}}) - return nil -} - -func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error { - sc.serveG.check() - switch { - case f.StreamID != 0: // stream-level flow control - st := sc.streams[f.StreamID] - if st == nil { - // "WINDOW_UPDATE can be sent by a peer that has sent a - // frame bearing the END_STREAM flag. This means that a - // receiver could receive a WINDOW_UPDATE frame on a "half - // closed (remote)" or "closed" stream. A receiver MUST - // NOT treat this as an error, see Section 5.1." - return nil - } - if !st.flow.add(int32(f.Increment)) { - return streamError(f.StreamID, ErrCodeFlowControl) - } - default: // connection-level flow control - if !sc.flow.add(int32(f.Increment)) { - return goAwayFlowError{} - } - } - sc.scheduleFrameWrite() - return nil -} - -func (sc *serverConn) processResetStream(f *RSTStreamFrame) error { - sc.serveG.check() - - state, st := sc.state(f.StreamID) - if state == stateIdle { - // 6.4 "RST_STREAM frames MUST NOT be sent for a - // stream in the "idle" state. If a RST_STREAM frame - // identifying an idle stream is received, the - // recipient MUST treat this as a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - return ConnectionError(ErrCodeProtocol) - } - if st != nil { - st.gotReset = true - st.cancelCtx() - sc.closeStream(st, streamError(f.StreamID, f.ErrCode)) - } - return nil -} - -func (sc *serverConn) closeStream(st *stream, err error) { - sc.serveG.check() - if st.state == stateIdle || st.state == stateClosed { - panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) - } - st.state = stateClosed - sc.curOpenStreams-- - if sc.curOpenStreams == 0 { - sc.setConnState(http.StateIdle) - } - delete(sc.streams, st.id) - if p := st.body; p != nil { - // Return any buffered unread bytes worth of conn-level flow control. - // See golang.org/issue/16481 - sc.sendWindowUpdate(nil, p.Len()) - - p.CloseWithError(err) - } - st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc - sc.writeSched.forgetStream(st.id) - if st.reqBuf != nil { - // Stash this request body buffer (64k) away for reuse - // by a future POST/PUT/etc. - // - // TODO(bradfitz): share on the server? sync.Pool? - // Server requires locks and might hurt contention. - // sync.Pool might work, or might be worse, depending - // on goroutine CPU migrations. (get and put on - // separate CPUs). Maybe a mix of strategies. But - // this is an easy win for now. - sc.freeRequestBodyBuf = st.reqBuf - } -} - -func (sc *serverConn) processSettings(f *SettingsFrame) error { - sc.serveG.check() - if f.IsAck() { - sc.unackedSettings-- - if sc.unackedSettings < 0 { - // Why is the peer ACKing settings we never sent? - // The spec doesn't mention this case, but - // hang up on them anyway. - return ConnectionError(ErrCodeProtocol) - } - return nil - } - if err := f.ForeachSetting(sc.processSetting); err != nil { - return err - } - sc.needToSendSettingsAck = true - sc.scheduleFrameWrite() - return nil -} - -func (sc *serverConn) processSetting(s Setting) error { - sc.serveG.check() - if err := s.Valid(); err != nil { - return err - } - if VerboseLogs { - sc.vlogf("http2: server processing setting %v", s) - } - switch s.ID { - case SettingHeaderTableSize: - sc.headerTableSize = s.Val - sc.hpackEncoder.SetMaxDynamicTableSize(s.Val) - case SettingEnablePush: - sc.pushEnabled = s.Val != 0 - case SettingMaxConcurrentStreams: - sc.clientMaxStreams = s.Val - case SettingInitialWindowSize: - return sc.processSettingInitialWindowSize(s.Val) - case SettingMaxFrameSize: - sc.writeSched.maxFrameSize = s.Val - case SettingMaxHeaderListSize: - sc.peerMaxHeaderListSize = s.Val - default: - // Unknown setting: "An endpoint that receives a SETTINGS - // frame with any unknown or unsupported identifier MUST - // ignore that setting." - if VerboseLogs { - sc.vlogf("http2: server ignoring unknown setting %v", s) - } - } - return nil -} - -func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { - sc.serveG.check() - // Note: val already validated to be within range by - // processSetting's Valid call. - - // "A SETTINGS frame can alter the initial flow control window - // size for all current streams. When the value of - // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST - // adjust the size of all stream flow control windows that it - // maintains by the difference between the new value and the - // old value." - old := sc.initialWindowSize - sc.initialWindowSize = int32(val) - growth := sc.initialWindowSize - old // may be negative - for _, st := range sc.streams { - if !st.flow.add(growth) { - // 6.9.2 Initial Flow Control Window Size - // "An endpoint MUST treat a change to - // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow - // control window to exceed the maximum size as a - // connection error (Section 5.4.1) of type - // FLOW_CONTROL_ERROR." - return ConnectionError(ErrCodeFlowControl) - } - } - return nil -} - -func (sc *serverConn) processData(f *DataFrame) error { - sc.serveG.check() - data := f.Data() - - // "If a DATA frame is received whose stream is not in "open" - // or "half closed (local)" state, the recipient MUST respond - // with a stream error (Section 5.4.2) of type STREAM_CLOSED." - id := f.Header().StreamID - st, ok := sc.streams[id] - if !ok || st.state != stateOpen || st.gotTrailerHeader { - // This includes sending a RST_STREAM if the stream is - // in stateHalfClosedLocal (which currently means that - // the http.Handler returned, so it's done reading & - // done writing). Try to stop the client from sending - // more DATA. - - // But still enforce their connection-level flow control, - // and return any flow control bytes since we're not going - // to consume them. - if sc.inflow.available() < int32(f.Length) { - return streamError(id, ErrCodeFlowControl) - } - // Deduct the flow control from inflow, since we're - // going to immediately add it back in - // sendWindowUpdate, which also schedules sending the - // frames. - sc.inflow.take(int32(f.Length)) - sc.sendWindowUpdate(nil, int(f.Length)) // conn-level - - return streamError(id, ErrCodeStreamClosed) - } - if st.body == nil { - panic("internal error: should have a body in this state") - } - - // Sender sending more than they'd declared? - if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { - st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) - return streamError(id, ErrCodeStreamClosed) - } - if f.Length > 0 { - // Check whether the client has flow control quota. - if st.inflow.available() < int32(f.Length) { - return streamError(id, ErrCodeFlowControl) - } - st.inflow.take(int32(f.Length)) - - if len(data) > 0 { - wrote, err := st.body.Write(data) - if err != nil { - return streamError(id, ErrCodeStreamClosed) - } - if wrote != len(data) { - panic("internal error: bad Writer") - } - st.bodyBytes += int64(len(data)) - } - - // Return any padded flow control now, since we won't - // refund it later on body reads. - if pad := int32(f.Length) - int32(len(data)); pad > 0 { - sc.sendWindowUpdate32(nil, pad) - sc.sendWindowUpdate32(st, pad) - } - } - if f.StreamEnded() { - st.endStream() - } - return nil -} - -// endStream closes a Request.Body's pipe. It is called when a DATA -// frame says a request body is over (or after trailers). -func (st *stream) endStream() { - sc := st.sc - sc.serveG.check() - - if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes { - st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes", - st.declBodyBytes, st.bodyBytes)) - } else { - st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest) - st.body.CloseWithError(io.EOF) - } - st.state = stateHalfClosedRemote -} - -// copyTrailersToHandlerRequest is run in the Handler's goroutine in -// its Request.Body.Read just before it gets io.EOF. -func (st *stream) copyTrailersToHandlerRequest() { - for k, vv := range st.trailer { - if _, ok := st.reqTrailer[k]; ok { - // Only copy it over it was pre-declared. - st.reqTrailer[k] = vv - } - } -} - -func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { - sc.serveG.check() - id := f.Header().StreamID - if sc.inGoAway { - // Ignore. - return nil - } - // http://http2.github.io/http2-spec/#rfc.section.5.1.1 - // Streams initiated by a client MUST use odd-numbered stream - // identifiers. [...] An endpoint that receives an unexpected - // stream identifier MUST respond with a connection error - // (Section 5.4.1) of type PROTOCOL_ERROR. - if id%2 != 1 { - return ConnectionError(ErrCodeProtocol) - } - // A HEADERS frame can be used to create a new stream or - // send a trailer for an open one. If we already have a stream - // open, let it process its own HEADERS frame (trailers at this - // point, if it's valid). - st := sc.streams[f.Header().StreamID] - if st != nil { - return st.processTrailerHeaders(f) - } - - // [...] The identifier of a newly established stream MUST be - // numerically greater than all streams that the initiating - // endpoint has opened or reserved. [...] An endpoint that - // receives an unexpected stream identifier MUST respond with - // a connection error (Section 5.4.1) of type PROTOCOL_ERROR. - if id <= sc.maxStreamID { - return ConnectionError(ErrCodeProtocol) - } - sc.maxStreamID = id - - ctx, cancelCtx := contextWithCancel(sc.baseCtx) - st = &stream{ - sc: sc, - id: id, - state: stateOpen, - ctx: ctx, - cancelCtx: cancelCtx, - } - if f.StreamEnded() { - st.state = stateHalfClosedRemote - } - st.cw.Init() - - st.flow.conn = &sc.flow // link to conn-level counter - st.flow.add(sc.initialWindowSize) - st.inflow.conn = &sc.inflow // link to conn-level counter - st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings - - sc.streams[id] = st - if f.HasPriority() { - adjustStreamPriority(sc.streams, st.id, f.Priority) - } - sc.curOpenStreams++ - if sc.curOpenStreams == 1 { - sc.setConnState(http.StateActive) - } - if sc.curOpenStreams > sc.advMaxStreams { - // "Endpoints MUST NOT exceed the limit set by their - // peer. An endpoint that receives a HEADERS frame - // that causes their advertised concurrent stream - // limit to be exceeded MUST treat this as a stream - // error (Section 5.4.2) of type PROTOCOL_ERROR or - // REFUSED_STREAM." - if sc.unackedSettings == 0 { - // They should know better. - return streamError(st.id, ErrCodeProtocol) - } - // Assume it's a network race, where they just haven't - // received our last SETTINGS update. But actually - // this can't happen yet, because we don't yet provide - // a way for users to adjust server parameters at - // runtime. - return streamError(st.id, ErrCodeRefusedStream) - } - - rw, req, err := sc.newWriterAndRequest(st, f) - if err != nil { - return err - } - st.reqTrailer = req.Trailer - if st.reqTrailer != nil { - st.trailer = make(http.Header) - } - st.body = req.Body.(*requestBody).pipe // may be nil - st.declBodyBytes = req.ContentLength - - handler := sc.handler.ServeHTTP - if f.Truncated { - // Their header list was too long. Send a 431 error. - handler = handleHeaderListTooLong - } else if err := checkValidHTTP2Request(req); err != nil { - handler = new400Handler(err) - } - - go sc.runHandler(rw, req, handler) - return nil -} - -func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { - sc := st.sc - sc.serveG.check() - if st.gotTrailerHeader { - return ConnectionError(ErrCodeProtocol) - } - st.gotTrailerHeader = true - if !f.StreamEnded() { - return streamError(st.id, ErrCodeProtocol) - } - - if len(f.PseudoFields()) > 0 { - return streamError(st.id, ErrCodeProtocol) - } - if st.trailer != nil { - for _, hf := range f.RegularFields() { - key := sc.canonicalHeader(hf.Name) - if !ValidTrailerHeader(key) { - // TODO: send more details to the peer somehow. But http2 has - // no way to send debug data at a stream level. Discuss with - // HTTP folk. - return streamError(st.id, ErrCodeProtocol) - } - st.trailer[key] = append(st.trailer[key], hf.Value) - } - } - st.endStream() - return nil -} - -func (sc *serverConn) processPriority(f *PriorityFrame) error { - adjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam) - return nil -} - -func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority PriorityParam) { - st, ok := streams[streamID] - if !ok { - // TODO: not quite correct (this streamID might - // already exist in the dep tree, but be closed), but - // close enough for now. - return - } - st.weight = priority.Weight - parent := streams[priority.StreamDep] // might be nil - if parent == st { - // if client tries to set this stream to be the parent of itself - // ignore and keep going - return - } - - // section 5.3.3: If a stream is made dependent on one of its - // own dependencies, the formerly dependent stream is first - // moved to be dependent on the reprioritized stream's previous - // parent. The moved dependency retains its weight. - for piter := parent; piter != nil; piter = piter.parent { - if piter == st { - parent.parent = st.parent - break - } - } - st.parent = parent - if priority.Exclusive && (st.parent != nil || priority.StreamDep == 0) { - for _, openStream := range streams { - if openStream != st && openStream.parent == st.parent { - openStream.parent = st - } - } - } -} - -func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) { - sc.serveG.check() - - method := f.PseudoValue("method") - path := f.PseudoValue("path") - scheme := f.PseudoValue("scheme") - authority := f.PseudoValue("authority") - - isConnect := method == "CONNECT" - if isConnect { - if path != "" || scheme != "" || authority == "" { - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) - } - } else if method == "" || path == "" || - (scheme != "https" && scheme != "http") { - // See 8.1.2.6 Malformed Requests and Responses: - // - // Malformed requests or responses that are detected - // MUST be treated as a stream error (Section 5.4.2) - // of type PROTOCOL_ERROR." - // - // 8.1.2.3 Request Pseudo-Header Fields - // "All HTTP/2 requests MUST include exactly one valid - // value for the :method, :scheme, and :path - // pseudo-header fields" - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) - } - - bodyOpen := !f.StreamEnded() - if method == "HEAD" && bodyOpen { - // HEAD requests can't have bodies - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) - } - var tlsState *tls.ConnectionState // nil if not scheme https - - if scheme == "https" { - tlsState = sc.tlsState - } - - header := make(http.Header) - for _, hf := range f.RegularFields() { - header.Add(sc.canonicalHeader(hf.Name), hf.Value) - } - - if authority == "" { - authority = header.Get("Host") - } - needsContinue := header.Get("Expect") == "100-continue" - if needsContinue { - header.Del("Expect") - } - // Merge Cookie headers into one "; "-delimited value. - if cookies := header["Cookie"]; len(cookies) > 1 { - header.Set("Cookie", strings.Join(cookies, "; ")) - } - - // Setup Trailers - var trailer http.Header - for _, v := range header["Trailer"] { - for _, key := range strings.Split(v, ",") { - key = http.CanonicalHeaderKey(strings.TrimSpace(key)) - switch key { - case "Transfer-Encoding", "Trailer", "Content-Length": - // Bogus. (copy of http1 rules) - // Ignore. - default: - if trailer == nil { - trailer = make(http.Header) - } - trailer[key] = nil - } - } - } - delete(header, "Trailer") - - body := &requestBody{ - conn: sc, - stream: st, - needsContinue: needsContinue, - } - var url_ *url.URL - var requestURI string - if isConnect { - url_ = &url.URL{Host: authority} - requestURI = authority // mimic HTTP/1 server behavior - } else { - var err error - url_, err = url.ParseRequestURI(path) - if err != nil { - return nil, nil, streamError(f.StreamID, ErrCodeProtocol) - } - requestURI = path - } - req := &http.Request{ - Method: method, - URL: url_, - RemoteAddr: sc.remoteAddrStr, - Header: header, - RequestURI: requestURI, - Proto: "HTTP/2.0", - ProtoMajor: 2, - ProtoMinor: 0, - TLS: tlsState, - Host: authority, - Body: body, - Trailer: trailer, - } - req = requestWithContext(req, st.ctx) - if bodyOpen { - // Disabled, per golang.org/issue/14960: - // st.reqBuf = sc.getRequestBodyBuf() - // TODO: remove this 64k of garbage per request (again, but without a data race): - buf := make([]byte, initialWindowSize) - - body.pipe = &pipe{ - b: &fixedBuffer{buf: buf}, - } - - if vv, ok := header["Content-Length"]; ok { - req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) - } else { - req.ContentLength = -1 - } - } - - rws := responseWriterStatePool.Get().(*responseWriterState) - bwSave := rws.bw - *rws = responseWriterState{} // zero all the fields - rws.conn = sc - rws.bw = bwSave - rws.bw.Reset(chunkWriter{rws}) - rws.stream = st - rws.req = req - rws.body = body - - rw := &responseWriter{rws: rws} - return rw, req, nil -} - -func (sc *serverConn) getRequestBodyBuf() []byte { - sc.serveG.check() - if buf := sc.freeRequestBodyBuf; buf != nil { - sc.freeRequestBodyBuf = nil - return buf - } - return make([]byte, initialWindowSize) -} - -// Run on its own goroutine. -func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { - didPanic := true - defer func() { - rw.rws.stream.cancelCtx() - if didPanic { - e := recover() - // Same as net/http: - const size = 64 << 10 - buf := make([]byte, size) - buf = buf[:runtime.Stack(buf, false)] - sc.writeFrameFromHandler(frameWriteMsg{ - write: handlerPanicRST{rw.rws.stream.id}, - stream: rw.rws.stream, - }) - sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf) - return - } - rw.handlerDone() - }() - handler(rw, req) - didPanic = false -} - -func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) { - // 10.5.1 Limits on Header Block Size: - // .. "A server that receives a larger header block than it is - // willing to handle can send an HTTP 431 (Request Header Fields Too - // Large) status code" - const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+ - w.WriteHeader(statusRequestHeaderFieldsTooLarge) - io.WriteString(w, "

HTTP Error 431

Request Header Field(s) Too Large

") -} - -// called from handler goroutines. -// h may be nil. -func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error { - sc.serveG.checkNotOn() // NOT on - var errc chan error - if headerData.h != nil { - // If there's a header map (which we don't own), so we have to block on - // waiting for this frame to be written, so an http.Flush mid-handler - // writes out the correct value of keys, before a handler later potentially - // mutates it. - errc = errChanPool.Get().(chan error) - } - if err := sc.writeFrameFromHandler(frameWriteMsg{ - write: headerData, - stream: st, - done: errc, - }); err != nil { - return err - } - if errc != nil { - select { - case err := <-errc: - errChanPool.Put(errc) - return err - case <-sc.doneServing: - return errClientDisconnected - case <-st.cw: - return errStreamClosed - } - } - return nil -} - -// called from handler goroutines. -func (sc *serverConn) write100ContinueHeaders(st *stream) { - sc.writeFrameFromHandler(frameWriteMsg{ - write: write100ContinueHeadersFrame{st.id}, - stream: st, - }) -} - -// A bodyReadMsg tells the server loop that the http.Handler read n -// bytes of the DATA from the client on the given stream. -type bodyReadMsg struct { - st *stream - n int -} - -// called from handler goroutines. -// Notes that the handler for the given stream ID read n bytes of its body -// and schedules flow control tokens to be sent. -func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) { - sc.serveG.checkNotOn() // NOT on - select { - case sc.bodyReadCh <- bodyReadMsg{st, n}: - case <-sc.doneServing: - } -} - -func (sc *serverConn) noteBodyRead(st *stream, n int) { - sc.serveG.check() - sc.sendWindowUpdate(nil, n) // conn-level - if st.state != stateHalfClosedRemote && st.state != stateClosed { - // Don't send this WINDOW_UPDATE if the stream is closed - // remotely. - sc.sendWindowUpdate(st, n) - } -} - -// st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate(st *stream, n int) { - sc.serveG.check() - // "The legal range for the increment to the flow control - // window is 1 to 2^31-1 (2,147,483,647) octets." - // A Go Read call on 64-bit machines could in theory read - // a larger Read than this. Very unlikely, but we handle it here - // rather than elsewhere for now. - const maxUint31 = 1<<31 - 1 - for n >= maxUint31 { - sc.sendWindowUpdate32(st, maxUint31) - n -= maxUint31 - } - sc.sendWindowUpdate32(st, int32(n)) -} - -// st may be nil for conn-level -func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) { - sc.serveG.check() - if n == 0 { - return - } - if n < 0 { - panic("negative update") - } - var streamID uint32 - if st != nil { - streamID = st.id - } - sc.writeFrame(frameWriteMsg{ - write: writeWindowUpdate{streamID: streamID, n: uint32(n)}, - stream: st, - }) - var ok bool - if st == nil { - ok = sc.inflow.add(n) - } else { - ok = st.inflow.add(n) - } - if !ok { - panic("internal error; sent too many window updates without decrements?") - } -} - -type requestBody struct { - stream *stream - conn *serverConn - closed bool - pipe *pipe // non-nil if we have a HTTP entity message body - needsContinue bool // need to send a 100-continue -} - -func (b *requestBody) Close() error { - if b.pipe != nil { - b.pipe.BreakWithError(errClosedBody) - } - b.closed = true - return nil -} - -func (b *requestBody) Read(p []byte) (n int, err error) { - if b.needsContinue { - b.needsContinue = false - b.conn.write100ContinueHeaders(b.stream) - } - if b.pipe == nil { - return 0, io.EOF - } - n, err = b.pipe.Read(p) - if n > 0 { - b.conn.noteBodyReadFromHandler(b.stream, n) - } - return -} - -// responseWriter is the http.ResponseWriter implementation. It's -// intentionally small (1 pointer wide) to minimize garbage. The -// responseWriterState pointer inside is zeroed at the end of a -// request (in handlerDone) and calls on the responseWriter thereafter -// simply crash (caller's mistake), but the much larger responseWriterState -// and buffers are reused between multiple requests. -type responseWriter struct { - rws *responseWriterState -} - -// Optional http.ResponseWriter interfaces implemented. -var ( - _ http.CloseNotifier = (*responseWriter)(nil) - _ http.Flusher = (*responseWriter)(nil) - _ stringWriter = (*responseWriter)(nil) -) - -type responseWriterState struct { - // immutable within a request: - stream *stream - req *http.Request - body *requestBody // to close at end of request, if DATA frames didn't - conn *serverConn - - // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc - bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState} - - // mutated by http.Handler goroutine: - handlerHeader http.Header // nil until called - snapHeader http.Header // snapshot of handlerHeader at WriteHeader time - trailers []string // set in writeChunk - status int // status code passed to WriteHeader - wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. - sentHeader bool // have we sent the header frame? - handlerDone bool // handler has finished - - sentContentLen int64 // non-zero if handler set a Content-Length header - wroteBytes int64 - - closeNotifierMu sync.Mutex // guards closeNotifierCh - closeNotifierCh chan bool // nil until first used -} - -type chunkWriter struct{ rws *responseWriterState } - -func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) } - -func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 } - -// declareTrailer is called for each Trailer header when the -// response header is written. It notes that a header will need to be -// written in the trailers at the end of the response. -func (rws *responseWriterState) declareTrailer(k string) { - k = http.CanonicalHeaderKey(k) - if !ValidTrailerHeader(k) { - // Forbidden by RFC 2616 14.40. - rws.conn.logf("ignoring invalid trailer %q", k) - return - } - if !strSliceContains(rws.trailers, k) { - rws.trailers = append(rws.trailers, k) - } -} - -// writeChunk writes chunks from the bufio.Writer. But because -// bufio.Writer may bypass its chunking, sometimes p may be -// arbitrarily large. -// -// writeChunk is also responsible (on the first chunk) for sending the -// HEADER response. -func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { - if !rws.wroteHeader { - rws.writeHeader(200) - } - - isHeadResp := rws.req.Method == "HEAD" - if !rws.sentHeader { - rws.sentHeader = true - var ctype, clen string - if clen = rws.snapHeader.Get("Content-Length"); clen != "" { - rws.snapHeader.Del("Content-Length") - clen64, err := strconv.ParseInt(clen, 10, 64) - if err == nil && clen64 >= 0 { - rws.sentContentLen = clen64 - } else { - clen = "" - } - } - if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { - clen = strconv.Itoa(len(p)) - } - _, hasContentType := rws.snapHeader["Content-Type"] - if !hasContentType && bodyAllowedForStatus(rws.status) { - ctype = http.DetectContentType(p) - } - var date string - if _, ok := rws.snapHeader["Date"]; !ok { - // TODO(bradfitz): be faster here, like net/http? measure. - date = time.Now().UTC().Format(http.TimeFormat) - } - - for _, v := range rws.snapHeader["Trailer"] { - foreachHeaderElement(v, rws.declareTrailer) - } - - endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp - err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ - streamID: rws.stream.id, - httpResCode: rws.status, - h: rws.snapHeader, - endStream: endStream, - contentType: ctype, - contentLength: clen, - date: date, - }) - if err != nil { - return 0, err - } - if endStream { - return 0, nil - } - } - if isHeadResp { - return len(p), nil - } - if len(p) == 0 && !rws.handlerDone { - return 0, nil - } - - if rws.handlerDone { - rws.promoteUndeclaredTrailers() - } - - endStream := rws.handlerDone && !rws.hasTrailers() - if len(p) > 0 || endStream { - // only send a 0 byte DATA frame if we're ending the stream. - if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { - return 0, err - } - } - - if rws.handlerDone && rws.hasTrailers() { - err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ - streamID: rws.stream.id, - h: rws.handlerHeader, - trailers: rws.trailers, - endStream: true, - }) - return len(p), err - } - return len(p), nil -} - -// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys -// that, if present, signals that the map entry is actually for -// the response trailers, and not the response headers. The prefix -// is stripped after the ServeHTTP call finishes and the values are -// sent in the trailers. -// -// This mechanism is intended only for trailers that are not known -// prior to the headers being written. If the set of trailers is fixed -// or known before the header is written, the normal Go trailers mechanism -// is preferred: -// https://golang.org/pkg/net/http/#ResponseWriter -// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers -const TrailerPrefix = "Trailer:" - -// promoteUndeclaredTrailers permits http.Handlers to set trailers -// after the header has already been flushed. Because the Go -// ResponseWriter interface has no way to set Trailers (only the -// Header), and because we didn't want to expand the ResponseWriter -// interface, and because nobody used trailers, and because RFC 2616 -// says you SHOULD (but not must) predeclare any trailers in the -// header, the official ResponseWriter rules said trailers in Go must -// be predeclared, and then we reuse the same ResponseWriter.Header() -// map to mean both Headers and Trailers. When it's time to write the -// Trailers, we pick out the fields of Headers that were declared as -// trailers. That worked for a while, until we found the first major -// user of Trailers in the wild: gRPC (using them only over http2), -// and gRPC libraries permit setting trailers mid-stream without -// predeclarnig them. So: change of plans. We still permit the old -// way, but we also permit this hack: if a Header() key begins with -// "Trailer:", the suffix of that key is a Trailer. Because ':' is an -// invalid token byte anyway, there is no ambiguity. (And it's already -// filtered out) It's mildly hacky, but not terrible. -// -// This method runs after the Handler is done and promotes any Header -// fields to be trailers. -func (rws *responseWriterState) promoteUndeclaredTrailers() { - for k, vv := range rws.handlerHeader { - if !strings.HasPrefix(k, TrailerPrefix) { - continue - } - trailerKey := strings.TrimPrefix(k, TrailerPrefix) - rws.declareTrailer(trailerKey) - rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv - } - - if len(rws.trailers) > 1 { - sorter := sorterPool.Get().(*sorter) - sorter.SortStrings(rws.trailers) - sorterPool.Put(sorter) - } -} - -func (w *responseWriter) Flush() { - rws := w.rws - if rws == nil { - panic("Header called after Handler finished") - } - if rws.bw.Buffered() > 0 { - if err := rws.bw.Flush(); err != nil { - // Ignore the error. The frame writer already knows. - return - } - } else { - // The bufio.Writer won't call chunkWriter.Write - // (writeChunk with zero bytes, so we have to do it - // ourselves to force the HTTP response header and/or - // final DATA frame (with END_STREAM) to be sent. - rws.writeChunk(nil) - } -} - -func (w *responseWriter) CloseNotify() <-chan bool { - rws := w.rws - if rws == nil { - panic("CloseNotify called after Handler finished") - } - rws.closeNotifierMu.Lock() - ch := rws.closeNotifierCh - if ch == nil { - ch = make(chan bool, 1) - rws.closeNotifierCh = ch - go func() { - rws.stream.cw.Wait() // wait for close - ch <- true - }() - } - rws.closeNotifierMu.Unlock() - return ch -} - -func (w *responseWriter) Header() http.Header { - rws := w.rws - if rws == nil { - panic("Header called after Handler finished") - } - if rws.handlerHeader == nil { - rws.handlerHeader = make(http.Header) - } - return rws.handlerHeader -} - -func (w *responseWriter) WriteHeader(code int) { - rws := w.rws - if rws == nil { - panic("WriteHeader called after Handler finished") - } - rws.writeHeader(code) -} - -func (rws *responseWriterState) writeHeader(code int) { - if !rws.wroteHeader { - rws.wroteHeader = true - rws.status = code - if len(rws.handlerHeader) > 0 { - rws.snapHeader = cloneHeader(rws.handlerHeader) - } - } -} - -func cloneHeader(h http.Header) http.Header { - h2 := make(http.Header, len(h)) - for k, vv := range h { - vv2 := make([]string, len(vv)) - copy(vv2, vv) - h2[k] = vv2 - } - return h2 -} - -// The Life Of A Write is like this: -// -// * Handler calls w.Write or w.WriteString -> -// * -> rws.bw (*bufio.Writer) -> -// * (Handler migth call Flush) -// * -> chunkWriter{rws} -// * -> responseWriterState.writeChunk(p []byte) -// * -> responseWriterState.writeChunk (most of the magic; see comment there) -func (w *responseWriter) Write(p []byte) (n int, err error) { - return w.write(len(p), p, "") -} - -func (w *responseWriter) WriteString(s string) (n int, err error) { - return w.write(len(s), nil, s) -} - -// either dataB or dataS is non-zero. -func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) { - rws := w.rws - if rws == nil { - panic("Write called after Handler finished") - } - if !rws.wroteHeader { - w.WriteHeader(200) - } - if !bodyAllowedForStatus(rws.status) { - return 0, http.ErrBodyNotAllowed - } - rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set - if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen { - // TODO: send a RST_STREAM - return 0, errors.New("http2: handler wrote more than declared Content-Length") - } - - if dataB != nil { - return rws.bw.Write(dataB) - } else { - return rws.bw.WriteString(dataS) - } -} - -func (w *responseWriter) handlerDone() { - rws := w.rws - rws.handlerDone = true - w.Flush() - w.rws = nil - responseWriterStatePool.Put(rws) -} - -// foreachHeaderElement splits v according to the "#rule" construction -// in RFC 2616 section 2.1 and calls fn for each non-empty element. -func foreachHeaderElement(v string, fn func(string)) { - v = textproto.TrimString(v) - if v == "" { - return - } - if !strings.Contains(v, ",") { - fn(v) - return - } - for _, f := range strings.Split(v, ",") { - if f = textproto.TrimString(f); f != "" { - fn(f) - } - } -} - -// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2 -var connHeaders = []string{ - "Connection", - "Keep-Alive", - "Proxy-Connection", - "Transfer-Encoding", - "Upgrade", -} - -// checkValidHTTP2Request checks whether req is a valid HTTP/2 request, -// per RFC 7540 Section 8.1.2.2. -// The returned error is reported to users. -func checkValidHTTP2Request(req *http.Request) error { - for _, h := range connHeaders { - if _, ok := req.Header[h]; ok { - return fmt.Errorf("request header %q is not valid in HTTP/2", h) - } - } - te := req.Header["Te"] - if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) { - return errors.New(`request header "TE" may only be "trailers" in HTTP/2`) - } - return nil -} - -func new400Handler(err error) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - http.Error(w, err.Error(), http.StatusBadRequest) - } -} - -// ValidTrailerHeader reports whether name is a valid header field name to appear -// in trailers. -// See: http://tools.ietf.org/html/rfc7230#section-4.1.2 -func ValidTrailerHeader(name string) bool { - name = http.CanonicalHeaderKey(name) - if strings.HasPrefix(name, "If-") || badTrailer[name] { - return false - } - return true -} - -var badTrailer = map[string]bool{ - "Authorization": true, - "Cache-Control": true, - "Connection": true, - "Content-Encoding": true, - "Content-Length": true, - "Content-Range": true, - "Content-Type": true, - "Expect": true, - "Host": true, - "Keep-Alive": true, - "Max-Forwards": true, - "Pragma": true, - "Proxy-Authenticate": true, - "Proxy-Authorization": true, - "Proxy-Connection": true, - "Range": true, - "Realm": true, - "Te": true, - "Trailer": true, - "Transfer-Encoding": true, - "Www-Authenticate": true, -} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go deleted file mode 100644 index bcd27ae64b..0000000000 --- a/vendor/golang.org/x/net/http2/transport.go +++ /dev/null @@ -1,1994 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Transport code. - -package http2 - -import ( - "bufio" - "bytes" - "compress/gzip" - "crypto/tls" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "math" - "net" - "net/http" - "sort" - "strconv" - "strings" - "sync" - "time" - - "golang.org/x/net/http2/hpack" - "golang.org/x/net/lex/httplex" -) - -const ( - // transportDefaultConnFlow is how many connection-level flow control - // tokens we give the server at start-up, past the default 64k. - transportDefaultConnFlow = 1 << 30 - - // transportDefaultStreamFlow is how many stream-level flow - // control tokens we announce to the peer, and how many bytes - // we buffer per stream. - transportDefaultStreamFlow = 4 << 20 - - // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send - // a stream-level WINDOW_UPDATE for at a time. - transportDefaultStreamMinRefresh = 4 << 10 - - defaultUserAgent = "Go-http-client/2.0" -) - -// Transport is an HTTP/2 Transport. -// -// A Transport internally caches connections to servers. It is safe -// for concurrent use by multiple goroutines. -type Transport struct { - // DialTLS specifies an optional dial function for creating - // TLS connections for requests. - // - // If DialTLS is nil, tls.Dial is used. - // - // If the returned net.Conn has a ConnectionState method like tls.Conn, - // it will be used to set http.Response.TLS. - DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error) - - // TLSClientConfig specifies the TLS configuration to use with - // tls.Client. If nil, the default configuration is used. - TLSClientConfig *tls.Config - - // ConnPool optionally specifies an alternate connection pool to use. - // If nil, the default is used. - ConnPool ClientConnPool - - // DisableCompression, if true, prevents the Transport from - // requesting compression with an "Accept-Encoding: gzip" - // request header when the Request contains no existing - // Accept-Encoding value. If the Transport requests gzip on - // its own and gets a gzipped response, it's transparently - // decoded in the Response.Body. However, if the user - // explicitly requested gzip it is not automatically - // uncompressed. - DisableCompression bool - - // AllowHTTP, if true, permits HTTP/2 requests using the insecure, - // plain-text "http" scheme. Note that this does not enable h2c support. - AllowHTTP bool - - // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to - // send in the initial settings frame. It is how many bytes - // of response headers are allow. Unlike the http2 spec, zero here - // means to use a default limit (currently 10MB). If you actually - // want to advertise an ulimited value to the peer, Transport - // interprets the highest possible value here (0xffffffff or 1<<32-1) - // to mean no limit. - MaxHeaderListSize uint32 - - // t1, if non-nil, is the standard library Transport using - // this transport. Its settings are used (but not its - // RoundTrip method, etc). - t1 *http.Transport - - connPoolOnce sync.Once - connPoolOrDef ClientConnPool // non-nil version of ConnPool -} - -func (t *Transport) maxHeaderListSize() uint32 { - if t.MaxHeaderListSize == 0 { - return 10 << 20 - } - if t.MaxHeaderListSize == 0xffffffff { - return 0 - } - return t.MaxHeaderListSize -} - -func (t *Transport) disableCompression() bool { - return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) -} - -var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") - -// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. -// It requires Go 1.6 or later and returns an error if the net/http package is too old -// or if t1 has already been HTTP/2-enabled. -func ConfigureTransport(t1 *http.Transport) error { - _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go - return err -} - -func (t *Transport) connPool() ClientConnPool { - t.connPoolOnce.Do(t.initConnPool) - return t.connPoolOrDef -} - -func (t *Transport) initConnPool() { - if t.ConnPool != nil { - t.connPoolOrDef = t.ConnPool - } else { - t.connPoolOrDef = &clientConnPool{t: t} - } -} - -// ClientConn is the state of a single HTTP/2 client connection to an -// HTTP/2 server. -type ClientConn struct { - t *Transport - tconn net.Conn // usually *tls.Conn, except specialized impls - tlsState *tls.ConnectionState // nil only for specialized impls - singleUse bool // whether being used for a single http.Request - - // readLoop goroutine fields: - readerDone chan struct{} // closed on error - readerErr error // set before readerDone is closed - - mu sync.Mutex // guards following - cond *sync.Cond // hold mu; broadcast on flow/closed changes - flow flow // our conn-level flow control quota (cs.flow is per stream) - inflow flow // peer's conn-level flow control - closed bool - wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back - goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received - goAwayDebug string // goAway frame's debug data, retained as a string - streams map[uint32]*clientStream // client-initiated - nextStreamID uint32 - bw *bufio.Writer - br *bufio.Reader - fr *Framer - lastActive time.Time - // Settings from peer: (also guarded by mu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - initialWindowSize uint32 - - hbuf bytes.Buffer // HPACK encoder writes into this - henc *hpack.Encoder - freeBuf [][]byte - - wmu sync.Mutex // held while writing; acquire AFTER mu if holding both - werr error // first write error that has occurred -} - -// clientStream is the state for a single HTTP/2 stream. One of these -// is created for each Transport.RoundTrip call. -type clientStream struct { - cc *ClientConn - req *http.Request - trace *clientTrace // or nil - ID uint32 - resc chan resAndError - bufPipe pipe // buffered pipe with the flow-controlled response payload - requestedGzip bool - on100 func() // optional code to run if get a 100 continue response - - flow flow // guarded by cc.mu - inflow flow // guarded by cc.mu - bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read - readErr error // sticky read error; owned by transportResponseBody.Read - stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu - - peerReset chan struct{} // closed on peer reset - resetErr error // populated before peerReset is closed - - done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu - - // owned by clientConnReadLoop: - firstByte bool // got the first response byte - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) - - trailer http.Header // accumulated trailers - resTrailer *http.Header // client's Response.Trailer -} - -// awaitRequestCancel runs in its own goroutine and waits for the user -// to cancel a RoundTrip request, its context to expire, or for the -// request to be done (any way it might be removed from the cc.streams -// map: peer reset, successful completion, TCP connection breakage, -// etc) -func (cs *clientStream) awaitRequestCancel(req *http.Request) { - ctx := reqContext(req) - if req.Cancel == nil && ctx.Done() == nil { - return - } - select { - case <-req.Cancel: - cs.bufPipe.CloseWithError(errRequestCanceled) - cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - case <-ctx.Done(): - cs.bufPipe.CloseWithError(ctx.Err()) - cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - case <-cs.done: - } -} - -// checkResetOrDone reports any error sent in a RST_STREAM frame by the -// server, or errStreamClosed if the stream is complete. -func (cs *clientStream) checkResetOrDone() error { - select { - case <-cs.peerReset: - return cs.resetErr - case <-cs.done: - return errStreamClosed - default: - return nil - } -} - -func (cs *clientStream) abortRequestBodyWrite(err error) { - if err == nil { - panic("nil error") - } - cc := cs.cc - cc.mu.Lock() - cs.stopReqBody = err - cc.cond.Broadcast() - cc.mu.Unlock() -} - -type stickyErrWriter struct { - w io.Writer - err *error -} - -func (sew stickyErrWriter) Write(p []byte) (n int, err error) { - if *sew.err != nil { - return 0, *sew.err - } - n, err = sew.w.Write(p) - *sew.err = err - return -} - -var ErrNoCachedConn = errors.New("http2: no cached connection was available") - -// RoundTripOpt are options for the Transport.RoundTripOpt method. -type RoundTripOpt struct { - // OnlyCachedConn controls whether RoundTripOpt may - // create a new TCP connection. If set true and - // no cached connection is available, RoundTripOpt - // will return ErrNoCachedConn. - OnlyCachedConn bool -} - -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - return t.RoundTripOpt(req, RoundTripOpt{}) -} - -// authorityAddr returns a given authority (a host/IP, or host:port / ip:port) -// and returns a host:port. The port 443 is added if needed. -func authorityAddr(scheme string, authority string) (addr string) { - if _, _, err := net.SplitHostPort(authority); err == nil { - return authority - } - port := "443" - if scheme == "http" { - port = "80" - } - return net.JoinHostPort(authority, port) -} - -// RoundTripOpt is like RoundTrip, but takes options. -func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) { - if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) { - return nil, errors.New("http2: unsupported scheme") - } - - addr := authorityAddr(req.URL.Scheme, req.URL.Host) - for { - cc, err := t.connPool().GetClientConn(req, addr) - if err != nil { - t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) - return nil, err - } - traceGotConn(req, cc) - res, err := cc.RoundTrip(req) - if shouldRetryRequest(req, err) { - continue - } - if err != nil { - t.vlogf("RoundTrip failure: %v", err) - return nil, err - } - return res, nil - } -} - -// CloseIdleConnections closes any connections which were previously -// connected from previous requests but are now sitting idle. -// It does not interrupt any connections currently in use. -func (t *Transport) CloseIdleConnections() { - if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok { - cp.closeIdleConnections() - } -} - -var ( - errClientConnClosed = errors.New("http2: client conn is closed") - errClientConnUnusable = errors.New("http2: client conn not usable") -) - -func shouldRetryRequest(req *http.Request, err error) bool { - // TODO: retry GET requests (no bodies) more aggressively, if shutdown - // before response. - return err == errClientConnUnusable -} - -func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { - host, _, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host)) - if err != nil { - return nil, err - } - return t.newClientConn(tconn, singleUse) -} - -func (t *Transport) newTLSConfig(host string) *tls.Config { - cfg := new(tls.Config) - if t.TLSClientConfig != nil { - *cfg = *t.TLSClientConfig - } - if !strSliceContains(cfg.NextProtos, NextProtoTLS) { - cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) - } - if cfg.ServerName == "" { - cfg.ServerName = host - } - return cfg -} - -func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) { - if t.DialTLS != nil { - return t.DialTLS - } - return t.dialTLSDefault -} - -func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) { - cn, err := tls.Dial(network, addr, cfg) - if err != nil { - return nil, err - } - if err := cn.Handshake(); err != nil { - return nil, err - } - if !cfg.InsecureSkipVerify { - if err := cn.VerifyHostname(cfg.ServerName); err != nil { - return nil, err - } - } - state := cn.ConnectionState() - if p := state.NegotiatedProtocol; p != NextProtoTLS { - return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS) - } - if !state.NegotiatedProtocolIsMutual { - return nil, errors.New("http2: could not negotiate protocol mutually") - } - return cn, nil -} - -// disableKeepAlives reports whether connections should be closed as -// soon as possible after handling the first request. -func (t *Transport) disableKeepAlives() bool { - return t.t1 != nil && t.t1.DisableKeepAlives -} - -func (t *Transport) expectContinueTimeout() time.Duration { - if t.t1 == nil { - return 0 - } - return transportExpectContinueTimeout(t.t1) -} - -func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { - return t.newClientConn(c, false) -} - -func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { - cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - } - if VerboseLogs { - t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) - } - - cc.cond = sync.NewCond(&cc.mu) - cc.flow.add(int32(initialWindowSize)) - - // TODO: adjust this writer size to account for frame size + - // MTU + crypto/tls record padding. - cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr}) - cc.br = bufio.NewReader(c) - cc.fr = NewFramer(cc.bw, cc.br) - cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil) - cc.fr.MaxHeaderListSize = t.maxHeaderListSize() - - // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on - // henc in response to SETTINGS frames? - cc.henc = hpack.NewEncoder(&cc.hbuf) - - if cs, ok := c.(connectionStater); ok { - state := cs.ConnectionState() - cc.tlsState = &state - } - - initialSettings := []Setting{ - {ID: SettingEnablePush, Val: 0}, - {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow}, - } - if max := t.maxHeaderListSize(); max != 0 { - initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max}) - } - - cc.bw.Write(clientPreface) - cc.fr.WriteSettings(initialSettings...) - cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow) - cc.inflow.add(transportDefaultConnFlow + initialWindowSize) - cc.bw.Flush() - if cc.werr != nil { - return nil, cc.werr - } - - go cc.readLoop() - return cc, nil -} - -func (cc *ClientConn) setGoAway(f *GoAwayFrame) { - cc.mu.Lock() - defer cc.mu.Unlock() - - old := cc.goAway - cc.goAway = f - - // Merge the previous and current GoAway error frames. - if cc.goAwayDebug == "" { - cc.goAwayDebug = string(f.DebugData()) - } - if old != nil && old.ErrCode != ErrCodeNo { - cc.goAway.ErrCode = old.ErrCode - } -} - -func (cc *ClientConn) CanTakeNewRequest() bool { - cc.mu.Lock() - defer cc.mu.Unlock() - return cc.canTakeNewRequestLocked() -} - -func (cc *ClientConn) canTakeNewRequestLocked() bool { - if cc.singleUse && cc.nextStreamID > 1 { - return false - } - return cc.goAway == nil && !cc.closed && - int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && - cc.nextStreamID < math.MaxInt32 -} - -func (cc *ClientConn) closeIfIdle() { - cc.mu.Lock() - if len(cc.streams) > 0 { - cc.mu.Unlock() - return - } - cc.closed = true - nextID := cc.nextStreamID - // TODO: do clients send GOAWAY too? maybe? Just Close: - cc.mu.Unlock() - - if VerboseLogs { - cc.vlogf("http2: Transport closing idle conn %p (forSingleUse=%v, maxStream=%v)", cc, cc.singleUse, nextID-2) - } - cc.tconn.Close() -} - -const maxAllocFrameSize = 512 << 10 - -// frameBuffer returns a scratch buffer suitable for writing DATA frames. -// They're capped at the min of the peer's max frame size or 512KB -// (kinda arbitrarily), but definitely capped so we don't allocate 4GB -// bufers. -func (cc *ClientConn) frameScratchBuffer() []byte { - cc.mu.Lock() - size := cc.maxFrameSize - if size > maxAllocFrameSize { - size = maxAllocFrameSize - } - for i, buf := range cc.freeBuf { - if len(buf) >= int(size) { - cc.freeBuf[i] = nil - cc.mu.Unlock() - return buf[:size] - } - } - cc.mu.Unlock() - return make([]byte, size) -} - -func (cc *ClientConn) putFrameScratchBuffer(buf []byte) { - cc.mu.Lock() - defer cc.mu.Unlock() - const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate. - if len(cc.freeBuf) < maxBufs { - cc.freeBuf = append(cc.freeBuf, buf) - return - } - for i, old := range cc.freeBuf { - if old == nil { - cc.freeBuf[i] = buf - return - } - } - // forget about it. -} - -// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not -// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests. -var errRequestCanceled = errors.New("net/http: request canceled") - -func commaSeparatedTrailers(req *http.Request) (string, error) { - keys := make([]string, 0, len(req.Trailer)) - for k := range req.Trailer { - k = http.CanonicalHeaderKey(k) - switch k { - case "Transfer-Encoding", "Trailer", "Content-Length": - return "", &badStringError{"invalid Trailer key", k} - } - keys = append(keys, k) - } - if len(keys) > 0 { - sort.Strings(keys) - // TODO: could do better allocation-wise here, but trailers are rare, - // so being lazy for now. - return strings.Join(keys, ","), nil - } - return "", nil -} - -func (cc *ClientConn) responseHeaderTimeout() time.Duration { - if cc.t.t1 != nil { - return cc.t.t1.ResponseHeaderTimeout - } - // No way to do this (yet?) with just an http2.Transport. Probably - // no need. Request.Cancel this is the new way. We only need to support - // this for compatibility with the old http.Transport fields when - // we're doing transparent http2. - return 0 -} - -// checkConnHeaders checks whether req has any invalid connection-level headers. -// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields. -// Certain headers are special-cased as okay but not transmitted later. -func checkConnHeaders(req *http.Request) error { - if v := req.Header.Get("Upgrade"); v != "" { - return errors.New("http2: invalid Upgrade request header") - } - if v := req.Header.Get("Transfer-Encoding"); (v != "" && v != "chunked") || len(req.Header["Transfer-Encoding"]) > 1 { - return errors.New("http2: invalid Transfer-Encoding request header") - } - if v := req.Header.Get("Connection"); (v != "" && v != "close" && v != "keep-alive") || len(req.Header["Connection"]) > 1 { - return errors.New("http2: invalid Connection request header") - } - return nil -} - -func bodyAndLength(req *http.Request) (body io.Reader, contentLen int64) { - body = req.Body - if body == nil { - return nil, 0 - } - if req.ContentLength != 0 { - return req.Body, req.ContentLength - } - - // We have a body but a zero content length. Test to see if - // it's actually zero or just unset. - var buf [1]byte - n, rerr := body.Read(buf[:]) - if rerr != nil && rerr != io.EOF { - return errorReader{rerr}, -1 - } - if n == 1 { - // Oh, guess there is data in this Body Reader after all. - // The ContentLength field just wasn't set. - // Stitch the Body back together again, re-attaching our - // consumed byte. - if rerr == io.EOF { - return bytes.NewReader(buf[:]), 1 - } - return io.MultiReader(bytes.NewReader(buf[:]), body), -1 - } - // Body is actually zero bytes. - return nil, 0 -} - -func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { - if err := checkConnHeaders(req); err != nil { - return nil, err - } - - trailers, err := commaSeparatedTrailers(req) - if err != nil { - return nil, err - } - hasTrailers := trailers != "" - - body, contentLen := bodyAndLength(req) - hasBody := body != nil - - cc.mu.Lock() - cc.lastActive = time.Now() - if cc.closed || !cc.canTakeNewRequestLocked() { - cc.mu.Unlock() - return nil, errClientConnUnusable - } - - // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? - var requestedGzip bool - if !cc.t.disableCompression() && - req.Header.Get("Accept-Encoding") == "" && - req.Header.Get("Range") == "" && - req.Method != "HEAD" { - // Request gzip only, not deflate. Deflate is ambiguous and - // not as universally supported anyway. - // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 - // - // Note that we don't request this for HEAD requests, - // due to a bug in nginx: - // http://trac.nginx.org/nginx/ticket/358 - // https://golang.org/issue/5522 - // - // We don't request gzip if the request is for a range, since - // auto-decoding a portion of a gzipped document will just fail - // anyway. See https://golang.org/issue/8923 - requestedGzip = true - } - - // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is - // sent by writeRequestBody below, along with any Trailers, - // again in form HEADERS{1}, CONTINUATION{0,}) - hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) - if err != nil { - cc.mu.Unlock() - return nil, err - } - - cs := cc.newStream() - cs.req = req - cs.trace = requestTrace(req) - cs.requestedGzip = requestedGzip - bodyWriter := cc.t.getBodyWriterState(cs, body) - cs.on100 = bodyWriter.on100 - - cc.wmu.Lock() - endStream := !hasBody && !hasTrailers - werr := cc.writeHeaders(cs.ID, endStream, hdrs) - cc.wmu.Unlock() - traceWroteHeaders(cs.trace) - cc.mu.Unlock() - - if werr != nil { - if hasBody { - req.Body.Close() // per RoundTripper contract - bodyWriter.cancel() - } - cc.forgetStreamID(cs.ID) - // Don't bother sending a RST_STREAM (our write already failed; - // no need to keep writing) - traceWroteRequest(cs.trace, werr) - return nil, werr - } - - var respHeaderTimer <-chan time.Time - if hasBody { - bodyWriter.scheduleBodyWrite() - } else { - traceWroteRequest(cs.trace, nil) - if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) - defer timer.Stop() - respHeaderTimer = timer.C - } - } - - readLoopResCh := cs.resc - bodyWritten := false - ctx := reqContext(req) - - handleReadLoopResponse := func(re resAndError) (*http.Response, error) { - res := re.res - if re.err != nil || res.StatusCode > 299 { - // On error or status code 3xx, 4xx, 5xx, etc abort any - // ongoing write, assuming that the server doesn't care - // about our request body. If the server replied with 1xx or - // 2xx, however, then assume the server DOES potentially - // want our body (e.g. full-duplex streaming: - // golang.org/issue/13444). If it turns out the server - // doesn't, they'll RST_STREAM us soon enough. This is a - // heuristic to avoid adding knobs to Transport. Hopefully - // we can keep it. - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWrite) - } - if re.err != nil { - cc.forgetStreamID(cs.ID) - return nil, re.err - } - res.Request = req - res.TLS = cc.tlsState - return res, nil - } - - for { - select { - case re := <-readLoopResCh: - return handleReadLoopResponse(re) - case <-respHeaderTimer: - cc.forgetStreamID(cs.ID) - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - } - return nil, errTimeout - case <-ctx.Done(): - cc.forgetStreamID(cs.ID) - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - } - return nil, ctx.Err() - case <-req.Cancel: - cc.forgetStreamID(cs.ID) - if !hasBody || bodyWritten { - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - } else { - bodyWriter.cancel() - cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) - } - return nil, errRequestCanceled - case <-cs.peerReset: - // processResetStream already removed the - // stream from the streams map; no need for - // forgetStreamID. - return nil, cs.resetErr - case err := <-bodyWriter.resc: - // Prefer the read loop's response, if available. Issue 16102. - select { - case re := <-readLoopResCh: - return handleReadLoopResponse(re) - default: - } - if err != nil { - return nil, err - } - bodyWritten = true - if d := cc.responseHeaderTimeout(); d != 0 { - timer := time.NewTimer(d) - defer timer.Stop() - respHeaderTimer = timer.C - } - } - } -} - -// requires cc.wmu be held -func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error { - first := true // first frame written (HEADERS is first, then CONTINUATION) - frameSize := int(cc.maxFrameSize) - for len(hdrs) > 0 && cc.werr == nil { - chunk := hdrs - if len(chunk) > frameSize { - chunk = chunk[:frameSize] - } - hdrs = hdrs[len(chunk):] - endHeaders := len(hdrs) == 0 - if first { - cc.fr.WriteHeaders(HeadersFrameParam{ - StreamID: streamID, - BlockFragment: chunk, - EndStream: endStream, - EndHeaders: endHeaders, - }) - first = false - } else { - cc.fr.WriteContinuation(streamID, endHeaders, chunk) - } - } - // TODO(bradfitz): this Flush could potentially block (as - // could the WriteHeaders call(s) above), which means they - // wouldn't respond to Request.Cancel being readable. That's - // rare, but this should probably be in a goroutine. - cc.bw.Flush() - return cc.werr -} - -// internal error values; they don't escape to callers -var ( - // abort request body write; don't send cancel - errStopReqBodyWrite = errors.New("http2: aborting request body write") - - // abort request body write, but send stream reset of cancel. - errStopReqBodyWriteAndCancel = errors.New("http2: canceling request") -) - -func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) { - cc := cs.cc - sentEnd := false // whether we sent the final DATA frame w/ END_STREAM - buf := cc.frameScratchBuffer() - defer cc.putFrameScratchBuffer(buf) - - defer func() { - traceWroteRequest(cs.trace, err) - // TODO: write h12Compare test showing whether - // Request.Body is closed by the Transport, - // and in multiple cases: server replies <=299 and >299 - // while still writing request body - cerr := bodyCloser.Close() - if err == nil { - err = cerr - } - }() - - req := cs.req - hasTrailers := req.Trailer != nil - - var sawEOF bool - for !sawEOF { - n, err := body.Read(buf) - if err == io.EOF { - sawEOF = true - err = nil - } else if err != nil { - return err - } - - remain := buf[:n] - for len(remain) > 0 && err == nil { - var allowed int32 - allowed, err = cs.awaitFlowControl(len(remain)) - switch { - case err == errStopReqBodyWrite: - return err - case err == errStopReqBodyWriteAndCancel: - cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) - return err - case err != nil: - return err - } - cc.wmu.Lock() - data := remain[:allowed] - remain = remain[allowed:] - sentEnd = sawEOF && len(remain) == 0 && !hasTrailers - err = cc.fr.WriteData(cs.ID, sentEnd, data) - if err == nil { - // TODO(bradfitz): this flush is for latency, not bandwidth. - // Most requests won't need this. Make this opt-in or - // opt-out? Use some heuristic on the body type? Nagel-like - // timers? Based on 'n'? Only last chunk of this for loop, - // unless flow control tokens are low? For now, always. - // If we change this, see comment below. - err = cc.bw.Flush() - } - cc.wmu.Unlock() - } - if err != nil { - return err - } - } - - if sentEnd { - // Already sent END_STREAM (which implies we have no - // trailers) and flushed, because currently all - // WriteData frames above get a flush. So we're done. - return nil - } - - var trls []byte - if hasTrailers { - cc.mu.Lock() - defer cc.mu.Unlock() - trls = cc.encodeTrailers(req) - } - - cc.wmu.Lock() - defer cc.wmu.Unlock() - - // Two ways to send END_STREAM: either with trailers, or - // with an empty DATA frame. - if len(trls) > 0 { - err = cc.writeHeaders(cs.ID, true, trls) - } else { - err = cc.fr.WriteData(cs.ID, true, nil) - } - if ferr := cc.bw.Flush(); ferr != nil && err == nil { - err = ferr - } - return err -} - -// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow -// control tokens from the server. -// It returns either the non-zero number of tokens taken or an error -// if the stream is dead. -func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) { - cc := cs.cc - cc.mu.Lock() - defer cc.mu.Unlock() - for { - if cc.closed { - return 0, errClientConnClosed - } - if cs.stopReqBody != nil { - return 0, cs.stopReqBody - } - if err := cs.checkResetOrDone(); err != nil { - return 0, err - } - if a := cs.flow.available(); a > 0 { - take := a - if int(take) > maxBytes { - - take = int32(maxBytes) // can't truncate int; take is int32 - } - if take > int32(cc.maxFrameSize) { - take = int32(cc.maxFrameSize) - } - cs.flow.take(take) - return take, nil - } - cc.cond.Wait() - } -} - -type badStringError struct { - what string - str string -} - -func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) } - -// requires cc.mu be held. -func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) { - cc.hbuf.Reset() - - host := req.Host - if host == "" { - host = req.URL.Host - } - - var path string - if req.Method != "CONNECT" { - path = req.URL.RequestURI() - if !validPseudoPath(path) { - orig := path - path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host) - if !validPseudoPath(path) { - if req.URL.Opaque != "" { - return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque) - } else { - return nil, fmt.Errorf("invalid request :path %q", orig) - } - } - } - } - - // Check for any invalid headers and return an error before we - // potentially pollute our hpack state. (We want to be able to - // continue to reuse the hpack encoder for future requests) - for k, vv := range req.Header { - if !httplex.ValidHeaderFieldName(k) { - return nil, fmt.Errorf("invalid HTTP header name %q", k) - } - for _, v := range vv { - if !httplex.ValidHeaderFieldValue(v) { - return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) - } - } - } - - // 8.1.2.3 Request Pseudo-Header Fields - // The :path pseudo-header field includes the path and query parts of the - // target URI (the path-absolute production and optionally a '?' character - // followed by the query production (see Sections 3.3 and 3.4 of - // [RFC3986]). - cc.writeHeader(":authority", host) - cc.writeHeader(":method", req.Method) - if req.Method != "CONNECT" { - cc.writeHeader(":path", path) - cc.writeHeader(":scheme", "https") - } - if trailers != "" { - cc.writeHeader("trailer", trailers) - } - - var didUA bool - for k, vv := range req.Header { - lowKey := strings.ToLower(k) - switch lowKey { - case "host", "content-length": - // Host is :authority, already sent. - // Content-Length is automatic, set below. - continue - case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive": - // Per 8.1.2.2 Connection-Specific Header - // Fields, don't send connection-specific - // fields. We have already checked if any - // are error-worthy so just ignore the rest. - continue - case "user-agent": - // Match Go's http1 behavior: at most one - // User-Agent. If set to nil or empty string, - // then omit it. Otherwise if not mentioned, - // include the default (below). - didUA = true - if len(vv) < 1 { - continue - } - vv = vv[:1] - if vv[0] == "" { - continue - } - } - for _, v := range vv { - cc.writeHeader(lowKey, v) - } - } - if shouldSendReqContentLength(req.Method, contentLength) { - cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10)) - } - if addGzipHeader { - cc.writeHeader("accept-encoding", "gzip") - } - if !didUA { - cc.writeHeader("user-agent", defaultUserAgent) - } - return cc.hbuf.Bytes(), nil -} - -// shouldSendReqContentLength reports whether the http2.Transport should send -// a "content-length" request header. This logic is basically a copy of the net/http -// transferWriter.shouldSendContentLength. -// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown). -// -1 means unknown. -func shouldSendReqContentLength(method string, contentLength int64) bool { - if contentLength > 0 { - return true - } - if contentLength < 0 { - return false - } - // For zero bodies, whether we send a content-length depends on the method. - // It also kinda doesn't matter for http2 either way, with END_STREAM. - switch method { - case "POST", "PUT", "PATCH": - return true - default: - return false - } -} - -// requires cc.mu be held. -func (cc *ClientConn) encodeTrailers(req *http.Request) []byte { - cc.hbuf.Reset() - for k, vv := range req.Trailer { - // Transfer-Encoding, etc.. have already been filter at the - // start of RoundTrip - lowKey := strings.ToLower(k) - for _, v := range vv { - cc.writeHeader(lowKey, v) - } - } - return cc.hbuf.Bytes() -} - -func (cc *ClientConn) writeHeader(name, value string) { - if VerboseLogs { - log.Printf("http2: Transport encoding header %q = %q", name, value) - } - cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value}) -} - -type resAndError struct { - res *http.Response - err error -} - -// requires cc.mu be held. -func (cc *ClientConn) newStream() *clientStream { - cs := &clientStream{ - cc: cc, - ID: cc.nextStreamID, - resc: make(chan resAndError, 1), - peerReset: make(chan struct{}), - done: make(chan struct{}), - } - cs.flow.add(int32(cc.initialWindowSize)) - cs.flow.setConnFlow(&cc.flow) - cs.inflow.add(transportDefaultStreamFlow) - cs.inflow.setConnFlow(&cc.inflow) - cc.nextStreamID += 2 - cc.streams[cs.ID] = cs - return cs -} - -func (cc *ClientConn) forgetStreamID(id uint32) { - cc.streamByID(id, true) -} - -func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { - cc.mu.Lock() - defer cc.mu.Unlock() - cs := cc.streams[id] - if andRemove && cs != nil && !cc.closed { - cc.lastActive = time.Now() - delete(cc.streams, id) - close(cs.done) - cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl - } - return cs -} - -// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. -type clientConnReadLoop struct { - cc *ClientConn - activeRes map[uint32]*clientStream // keyed by streamID - closeWhenIdle bool -} - -// readLoop runs in its own goroutine and reads and dispatches frames. -func (cc *ClientConn) readLoop() { - rl := &clientConnReadLoop{ - cc: cc, - activeRes: make(map[uint32]*clientStream), - } - - defer rl.cleanup() - cc.readerErr = rl.run() - if ce, ok := cc.readerErr.(ConnectionError); ok { - cc.wmu.Lock() - cc.fr.WriteGoAway(0, ErrCode(ce), nil) - cc.wmu.Unlock() - } -} - -// GoAwayError is returned by the Transport when the server closes the -// TCP connection after sending a GOAWAY frame. -type GoAwayError struct { - LastStreamID uint32 - ErrCode ErrCode - DebugData string -} - -func (e GoAwayError) Error() string { - return fmt.Sprintf("http2: server sent GOAWAY and closed the connection; LastStreamID=%v, ErrCode=%v, debug=%q", - e.LastStreamID, e.ErrCode, e.DebugData) -} - -func isEOFOrNetReadError(err error) bool { - if err == io.EOF { - return true - } - ne, ok := err.(*net.OpError) - return ok && ne.Op == "read" -} - -func (rl *clientConnReadLoop) cleanup() { - cc := rl.cc - defer cc.tconn.Close() - defer cc.t.connPool().MarkDead(cc) - defer close(cc.readerDone) - - // Close any response bodies if the server closes prematurely. - // TODO: also do this if we've written the headers but not - // gotten a response yet. - err := cc.readerErr - cc.mu.Lock() - if cc.goAway != nil && isEOFOrNetReadError(err) { - err = GoAwayError{ - LastStreamID: cc.goAway.LastStreamID, - ErrCode: cc.goAway.ErrCode, - DebugData: cc.goAwayDebug, - } - } else if err == io.EOF { - err = io.ErrUnexpectedEOF - } - for _, cs := range rl.activeRes { - cs.bufPipe.CloseWithError(err) - } - for _, cs := range cc.streams { - select { - case cs.resc <- resAndError{err: err}: - default: - } - close(cs.done) - } - cc.closed = true - cc.cond.Broadcast() - cc.mu.Unlock() -} - -func (rl *clientConnReadLoop) run() error { - cc := rl.cc - rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse - gotReply := false // ever saw a HEADERS reply - gotSettings := false - for { - f, err := cc.fr.ReadFrame() - if err != nil { - cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) - } - if se, ok := err.(StreamError); ok { - if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil { - cs.cc.writeStreamReset(cs.ID, se.Code, err) - if se.Cause == nil { - se.Cause = cc.fr.errDetail - } - rl.endStreamError(cs, se) - } - continue - } else if err != nil { - return err - } - if VerboseLogs { - cc.vlogf("http2: Transport received %s", summarizeFrame(f)) - } - if !gotSettings { - if _, ok := f.(*SettingsFrame); !ok { - cc.logf("protocol error: received %T before a SETTINGS frame", f) - return ConnectionError(ErrCodeProtocol) - } - gotSettings = true - } - maybeIdle := false // whether frame might transition us to idle - - switch f := f.(type) { - case *MetaHeadersFrame: - err = rl.processHeaders(f) - maybeIdle = true - gotReply = true - case *DataFrame: - err = rl.processData(f) - maybeIdle = true - case *GoAwayFrame: - err = rl.processGoAway(f) - maybeIdle = true - case *RSTStreamFrame: - err = rl.processResetStream(f) - maybeIdle = true - case *SettingsFrame: - err = rl.processSettings(f) - case *PushPromiseFrame: - err = rl.processPushPromise(f) - case *WindowUpdateFrame: - err = rl.processWindowUpdate(f) - case *PingFrame: - err = rl.processPing(f) - default: - cc.logf("Transport: unhandled response frame type %T", f) - } - if err != nil { - if VerboseLogs { - cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err) - } - return err - } - if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { - cc.closeIfIdle() - } - } -} - -func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, f.StreamEnded()) - if cs == nil { - // We'd get here if we canceled a request while the - // server had its response still in flight. So if this - // was just something we canceled, ignore it. - return nil - } - if !cs.firstByte { - if cs.trace != nil { - // TODO(bradfitz): move first response byte earlier, - // when we first read the 9 byte header, not waiting - // until all the HEADERS+CONTINUATION frames have been - // merged. This works for now. - traceFirstResponseByte(cs.trace) - } - cs.firstByte = true - } - if !cs.pastHeaders { - cs.pastHeaders = true - } else { - return rl.processTrailers(cs, f) - } - - res, err := rl.handleResponse(cs, f) - if err != nil { - if _, ok := err.(ConnectionError); ok { - return err - } - // Any other error type is a stream error. - cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) - cs.resc <- resAndError{err: err} - return nil // return nil from process* funcs to keep conn alive - } - if res == nil { - // (nil, nil) special case. See handleResponse docs. - return nil - } - if res.Body != noBody { - rl.activeRes[cs.ID] = cs - } - cs.resTrailer = &res.Trailer - cs.resc <- resAndError{res: res} - return nil -} - -// may return error types nil, or ConnectionError. Any other error value -// is a StreamError of type ErrCodeProtocol. The returned error in that case -// is the detail. -// -// As a special case, handleResponse may return (nil, nil) to skip the -// frame (currently only used for 100 expect continue). This special -// case is going away after Issue 13851 is fixed. -func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { - if f.Truncated { - return nil, errResponseHeaderListSize - } - - status := f.PseudoValue("status") - if status == "" { - return nil, errors.New("missing status pseudo header") - } - statusCode, err := strconv.Atoi(status) - if err != nil { - return nil, errors.New("malformed non-numeric status pseudo header") - } - - if statusCode == 100 { - traceGot100Continue(cs.trace) - if cs.on100 != nil { - cs.on100() // forces any write delay timer to fire - } - cs.pastHeaders = false // do it all again - return nil, nil - } - - header := make(http.Header) - res := &http.Response{ - Proto: "HTTP/2.0", - ProtoMajor: 2, - Header: header, - StatusCode: statusCode, - Status: status + " " + http.StatusText(statusCode), - } - for _, hf := range f.RegularFields() { - key := http.CanonicalHeaderKey(hf.Name) - if key == "Trailer" { - t := res.Trailer - if t == nil { - t = make(http.Header) - res.Trailer = t - } - foreachHeaderElement(hf.Value, func(v string) { - t[http.CanonicalHeaderKey(v)] = nil - }) - } else { - header[key] = append(header[key], hf.Value) - } - } - - streamEnded := f.StreamEnded() - isHead := cs.req.Method == "HEAD" - if !streamEnded || isHead { - res.ContentLength = -1 - if clens := res.Header["Content-Length"]; len(clens) == 1 { - if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil { - res.ContentLength = clen64 - } else { - // TODO: care? unlike http/1, it won't mess up our framing, so it's - // more safe smuggling-wise to ignore. - } - } else if len(clens) > 1 { - // TODO: care? unlike http/1, it won't mess up our framing, so it's - // more safe smuggling-wise to ignore. - } - } - - if streamEnded || isHead { - res.Body = noBody - return res, nil - } - - buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage - cs.bufPipe = pipe{b: buf} - cs.bytesRemain = res.ContentLength - res.Body = transportResponseBody{cs} - go cs.awaitRequestCancel(cs.req) - - if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" { - res.Header.Del("Content-Encoding") - res.Header.Del("Content-Length") - res.ContentLength = -1 - res.Body = &gzipReader{body: res.Body} - setResponseUncompressed(res) - } - return res, nil -} - -func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error { - if cs.pastTrailers { - // Too many HEADERS frames for this stream. - return ConnectionError(ErrCodeProtocol) - } - cs.pastTrailers = true - if !f.StreamEnded() { - // We expect that any headers for trailers also - // has END_STREAM. - return ConnectionError(ErrCodeProtocol) - } - if len(f.PseudoFields()) > 0 { - // No pseudo header fields are defined for trailers. - // TODO: ConnectionError might be overly harsh? Check. - return ConnectionError(ErrCodeProtocol) - } - - trailer := make(http.Header) - for _, hf := range f.RegularFields() { - key := http.CanonicalHeaderKey(hf.Name) - trailer[key] = append(trailer[key], hf.Value) - } - cs.trailer = trailer - - rl.endStream(cs) - return nil -} - -// transportResponseBody is the concrete type of Transport.RoundTrip's -// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body. -// On Close it sends RST_STREAM if EOF wasn't already seen. -type transportResponseBody struct { - cs *clientStream -} - -func (b transportResponseBody) Read(p []byte) (n int, err error) { - cs := b.cs - cc := cs.cc - - if cs.readErr != nil { - return 0, cs.readErr - } - n, err = b.cs.bufPipe.Read(p) - if cs.bytesRemain != -1 { - if int64(n) > cs.bytesRemain { - n = int(cs.bytesRemain) - if err == nil { - err = errors.New("net/http: server replied with more than declared Content-Length; truncated") - cc.writeStreamReset(cs.ID, ErrCodeProtocol, err) - } - cs.readErr = err - return int(cs.bytesRemain), err - } - cs.bytesRemain -= int64(n) - if err == io.EOF && cs.bytesRemain > 0 { - err = io.ErrUnexpectedEOF - cs.readErr = err - return n, err - } - } - if n == 0 { - // No flow control tokens to send back. - return - } - - cc.mu.Lock() - defer cc.mu.Unlock() - - var connAdd, streamAdd int32 - // Check the conn-level first, before the stream-level. - if v := cc.inflow.available(); v < transportDefaultConnFlow/2 { - connAdd = transportDefaultConnFlow - v - cc.inflow.add(connAdd) - } - if err == nil { // No need to refresh if the stream is over or failed. - // Consider any buffered body data (read from the conn but not - // consumed by the client) when computing flow control for this - // stream. - v := int(cs.inflow.available()) + cs.bufPipe.Len() - if v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh { - streamAdd = int32(transportDefaultStreamFlow - v) - cs.inflow.add(streamAdd) - } - } - if connAdd != 0 || streamAdd != 0 { - cc.wmu.Lock() - defer cc.wmu.Unlock() - if connAdd != 0 { - cc.fr.WriteWindowUpdate(0, mustUint31(connAdd)) - } - if streamAdd != 0 { - cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd)) - } - cc.bw.Flush() - } - return -} - -var errClosedResponseBody = errors.New("http2: response body closed") - -func (b transportResponseBody) Close() error { - cs := b.cs - cc := cs.cc - - serverSentStreamEnd := cs.bufPipe.Err() == io.EOF - unread := cs.bufPipe.Len() - - if unread > 0 || !serverSentStreamEnd { - cc.mu.Lock() - cc.wmu.Lock() - if !serverSentStreamEnd { - cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) - } - // Return connection-level flow control. - if unread > 0 { - cc.inflow.add(int32(unread)) - cc.fr.WriteWindowUpdate(0, uint32(unread)) - } - cc.bw.Flush() - cc.wmu.Unlock() - cc.mu.Unlock() - } - - cs.bufPipe.BreakWithError(errClosedResponseBody) - return nil -} - -func (rl *clientConnReadLoop) processData(f *DataFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, f.StreamEnded()) - data := f.Data() - if cs == nil { - cc.mu.Lock() - neverSent := cc.nextStreamID - cc.mu.Unlock() - if f.StreamID >= neverSent { - // We never asked for this. - cc.logf("http2: Transport received unsolicited DATA frame; closing connection") - return ConnectionError(ErrCodeProtocol) - } - // We probably did ask for this, but canceled. Just ignore it. - // TODO: be stricter here? only silently ignore things which - // we canceled, but not things which were closed normally - // by the peer? Tough without accumulating too much state. - - // But at least return their flow control: - if f.Length > 0 { - cc.mu.Lock() - cc.inflow.add(int32(f.Length)) - cc.mu.Unlock() - - cc.wmu.Lock() - cc.fr.WriteWindowUpdate(0, uint32(f.Length)) - cc.bw.Flush() - cc.wmu.Unlock() - } - return nil - } - if f.Length > 0 { - if len(data) > 0 && cs.bufPipe.b == nil { - // Data frame after it's already closed? - cc.logf("http2: Transport received DATA frame for closed stream; closing connection") - return ConnectionError(ErrCodeProtocol) - } - - // Check connection-level flow control. - cc.mu.Lock() - if cs.inflow.available() >= int32(f.Length) { - cs.inflow.take(int32(f.Length)) - } else { - cc.mu.Unlock() - return ConnectionError(ErrCodeFlowControl) - } - // Return any padded flow control now, since we won't - // refund it later on body reads. - if pad := int32(f.Length) - int32(len(data)); pad > 0 { - cs.inflow.add(pad) - cc.inflow.add(pad) - cc.wmu.Lock() - cc.fr.WriteWindowUpdate(0, uint32(pad)) - cc.fr.WriteWindowUpdate(cs.ID, uint32(pad)) - cc.bw.Flush() - cc.wmu.Unlock() - } - cc.mu.Unlock() - - if len(data) > 0 { - if _, err := cs.bufPipe.Write(data); err != nil { - rl.endStreamError(cs, err) - return err - } - } - } - - if f.StreamEnded() { - rl.endStream(cs) - } - return nil -} - -var errInvalidTrailers = errors.New("http2: invalid trailers") - -func (rl *clientConnReadLoop) endStream(cs *clientStream) { - // TODO: check that any declared content-length matches, like - // server.go's (*stream).endStream method. - rl.endStreamError(cs, nil) -} - -func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { - var code func() - if err == nil { - err = io.EOF - code = cs.copyTrailers - } - cs.bufPipe.closeWithErrorAndCode(err, code) - delete(rl.activeRes, cs.ID) - if isConnectionCloseRequest(cs.req) { - rl.closeWhenIdle = true - } - - select { - case cs.resc <- resAndError{err: err}: - default: - } -} - -func (cs *clientStream) copyTrailers() { - for k, vv := range cs.trailer { - t := cs.resTrailer - if *t == nil { - *t = make(http.Header) - } - (*t)[k] = vv - } -} - -func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error { - cc := rl.cc - cc.t.connPool().MarkDead(cc) - if f.ErrCode != 0 { - // TODO: deal with GOAWAY more. particularly the error code - cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode) - } - cc.setGoAway(f) - return nil -} - -func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { - cc := rl.cc - cc.mu.Lock() - defer cc.mu.Unlock() - - if f.IsAck() { - if cc.wantSettingsAck { - cc.wantSettingsAck = false - return nil - } - return ConnectionError(ErrCodeProtocol) - } - - err := f.ForeachSetting(func(s Setting) error { - switch s.ID { - case SettingMaxFrameSize: - cc.maxFrameSize = s.Val - case SettingMaxConcurrentStreams: - cc.maxConcurrentStreams = s.Val - case SettingInitialWindowSize: - // Values above the maximum flow-control - // window size of 2^31-1 MUST be treated as a - // connection error (Section 5.4.1) of type - // FLOW_CONTROL_ERROR. - if s.Val > math.MaxInt32 { - return ConnectionError(ErrCodeFlowControl) - } - - // Adjust flow control of currently-open - // frames by the difference of the old initial - // window size and this one. - delta := int32(s.Val) - int32(cc.initialWindowSize) - for _, cs := range cc.streams { - cs.flow.add(delta) - } - cc.cond.Broadcast() - - cc.initialWindowSize = s.Val - default: - // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably. - cc.vlogf("Unhandled Setting: %v", s) - } - return nil - }) - if err != nil { - return err - } - - cc.wmu.Lock() - defer cc.wmu.Unlock() - - cc.fr.WriteSettingsAck() - cc.bw.Flush() - return cc.werr -} - -func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error { - cc := rl.cc - cs := cc.streamByID(f.StreamID, false) - if f.StreamID != 0 && cs == nil { - return nil - } - - cc.mu.Lock() - defer cc.mu.Unlock() - - fl := &cc.flow - if cs != nil { - fl = &cs.flow - } - if !fl.add(int32(f.Increment)) { - return ConnectionError(ErrCodeFlowControl) - } - cc.cond.Broadcast() - return nil -} - -func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { - cs := rl.cc.streamByID(f.StreamID, true) - if cs == nil { - // TODO: return error if server tries to RST_STEAM an idle stream - return nil - } - select { - case <-cs.peerReset: - // Already reset. - // This is the only goroutine - // which closes this, so there - // isn't a race. - default: - err := streamError(cs.ID, f.ErrCode) - cs.resetErr = err - close(cs.peerReset) - cs.bufPipe.CloseWithError(err) - cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl - } - delete(rl.activeRes, cs.ID) - return nil -} - -func (rl *clientConnReadLoop) processPing(f *PingFrame) error { - if f.IsAck() { - // 6.7 PING: " An endpoint MUST NOT respond to PING frames - // containing this flag." - return nil - } - cc := rl.cc - cc.wmu.Lock() - defer cc.wmu.Unlock() - if err := cc.fr.WritePing(true, f.Data); err != nil { - return err - } - return cc.bw.Flush() -} - -func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error { - // We told the peer we don't want them. - // Spec says: - // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH - // setting of the peer endpoint is set to 0. An endpoint that - // has set this setting and has received acknowledgement MUST - // treat the receipt of a PUSH_PROMISE frame as a connection - // error (Section 5.4.1) of type PROTOCOL_ERROR." - return ConnectionError(ErrCodeProtocol) -} - -func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) { - // TODO: map err to more interesting error codes, once the - // HTTP community comes up with some. But currently for - // RST_STREAM there's no equivalent to GOAWAY frame's debug - // data, and the error codes are all pretty vague ("cancel"). - cc.wmu.Lock() - cc.fr.WriteRSTStream(streamID, code) - cc.bw.Flush() - cc.wmu.Unlock() -} - -var ( - errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") - errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") -) - -func (cc *ClientConn) logf(format string, args ...interface{}) { - cc.t.logf(format, args...) -} - -func (cc *ClientConn) vlogf(format string, args ...interface{}) { - cc.t.vlogf(format, args...) -} - -func (t *Transport) vlogf(format string, args ...interface{}) { - if VerboseLogs { - t.logf(format, args...) - } -} - -func (t *Transport) logf(format string, args ...interface{}) { - log.Printf(format, args...) -} - -var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil)) - -func strSliceContains(ss []string, s string) bool { - for _, v := range ss { - if v == s { - return true - } - } - return false -} - -type erringRoundTripper struct{ err error } - -func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } - -// gzipReader wraps a response body so it can lazily -// call gzip.NewReader on the first call to Read -type gzipReader struct { - body io.ReadCloser // underlying Response.Body - zr *gzip.Reader // lazily-initialized gzip reader - zerr error // sticky error -} - -func (gz *gzipReader) Read(p []byte) (n int, err error) { - if gz.zerr != nil { - return 0, gz.zerr - } - if gz.zr == nil { - gz.zr, err = gzip.NewReader(gz.body) - if err != nil { - gz.zerr = err - return 0, err - } - } - return gz.zr.Read(p) -} - -func (gz *gzipReader) Close() error { - return gz.body.Close() -} - -type errorReader struct{ err error } - -func (r errorReader) Read(p []byte) (int, error) { return 0, r.err } - -// bodyWriterState encapsulates various state around the Transport's writing -// of the request body, particularly regarding doing delayed writes of the body -// when the request contains "Expect: 100-continue". -type bodyWriterState struct { - cs *clientStream - timer *time.Timer // if non-nil, we're doing a delayed write - fnonce *sync.Once // to call fn with - fn func() // the code to run in the goroutine, writing the body - resc chan error // result of fn's execution - delay time.Duration // how long we should delay a delayed write for -} - -func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) { - s.cs = cs - if body == nil { - return - } - resc := make(chan error, 1) - s.resc = resc - s.fn = func() { - resc <- cs.writeRequestBody(body, cs.req.Body) - } - s.delay = t.expectContinueTimeout() - if s.delay == 0 || - !httplex.HeaderValuesContainsToken( - cs.req.Header["Expect"], - "100-continue") { - return - } - s.fnonce = new(sync.Once) - - // Arm the timer with a very large duration, which we'll - // intentionally lower later. It has to be large now because - // we need a handle to it before writing the headers, but the - // s.delay value is defined to not start until after the - // request headers were written. - const hugeDuration = 365 * 24 * time.Hour - s.timer = time.AfterFunc(hugeDuration, func() { - s.fnonce.Do(s.fn) - }) - return -} - -func (s bodyWriterState) cancel() { - if s.timer != nil { - s.timer.Stop() - } -} - -func (s bodyWriterState) on100() { - if s.timer == nil { - // If we didn't do a delayed write, ignore the server's - // bogus 100 continue response. - return - } - s.timer.Stop() - go func() { s.fnonce.Do(s.fn) }() -} - -// scheduleBodyWrite starts writing the body, either immediately (in -// the common case) or after the delay timeout. It should not be -// called until after the headers have been written. -func (s bodyWriterState) scheduleBodyWrite() { - if s.timer == nil { - // We're not doing a delayed write (see - // getBodyWriterState), so just start the writing - // goroutine immediately. - go s.fn() - return - } - traceWait100Continue(s.cs.trace) - if s.timer.Stop() { - s.timer.Reset(s.delay) - } -} - -// isConnectionCloseRequest reports whether req should use its own -// connection for a single request and then close the connection. -func isConnectionCloseRequest(req *http.Request) bool { - return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close") -} diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go deleted file mode 100644 index 27ef0dd4d7..0000000000 --- a/vendor/golang.org/x/net/http2/write.go +++ /dev/null @@ -1,264 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "bytes" - "fmt" - "log" - "net/http" - "time" - - "golang.org/x/net/http2/hpack" - "golang.org/x/net/lex/httplex" -) - -// writeFramer is implemented by any type that is used to write frames. -type writeFramer interface { - writeFrame(writeContext) error -} - -// writeContext is the interface needed by the various frame writer -// types below. All the writeFrame methods below are scheduled via the -// frame writing scheduler (see writeScheduler in writesched.go). -// -// This interface is implemented by *serverConn. -// -// TODO: decide whether to a) use this in the client code (which didn't -// end up using this yet, because it has a simpler design, not -// currently implementing priorities), or b) delete this and -// make the server code a bit more concrete. -type writeContext interface { - Framer() *Framer - Flush() error - CloseConn() error - // HeaderEncoder returns an HPACK encoder that writes to the - // returned buffer. - HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) -} - -// endsStream reports whether the given frame writer w will locally -// close the stream. -func endsStream(w writeFramer) bool { - switch v := w.(type) { - case *writeData: - return v.endStream - case *writeResHeaders: - return v.endStream - case nil: - // This can only happen if the caller reuses w after it's - // been intentionally nil'ed out to prevent use. Keep this - // here to catch future refactoring breaking it. - panic("endsStream called on nil writeFramer") - } - return false -} - -type flushFrameWriter struct{} - -func (flushFrameWriter) writeFrame(ctx writeContext) error { - return ctx.Flush() -} - -type writeSettings []Setting - -func (s writeSettings) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteSettings([]Setting(s)...) -} - -type writeGoAway struct { - maxStreamID uint32 - code ErrCode -} - -func (p *writeGoAway) writeFrame(ctx writeContext) error { - err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) - if p.code != 0 { - ctx.Flush() // ignore error: we're hanging up on them anyway - time.Sleep(50 * time.Millisecond) - ctx.CloseConn() - } - return err -} - -type writeData struct { - streamID uint32 - p []byte - endStream bool -} - -func (w *writeData) String() string { - return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream) -} - -func (w *writeData) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteData(w.streamID, w.endStream, w.p) -} - -// handlerPanicRST is the message sent from handler goroutines when -// the handler panics. -type handlerPanicRST struct { - StreamID uint32 -} - -func (hp handlerPanicRST) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal) -} - -func (se StreamError) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteRSTStream(se.StreamID, se.Code) -} - -type writePingAck struct{ pf *PingFrame } - -func (w writePingAck) writeFrame(ctx writeContext) error { - return ctx.Framer().WritePing(true, w.pf.Data) -} - -type writeSettingsAck struct{} - -func (writeSettingsAck) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteSettingsAck() -} - -// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames -// for HTTP response headers or trailers from a server handler. -type writeResHeaders struct { - streamID uint32 - httpResCode int // 0 means no ":status" line - h http.Header // may be nil - trailers []string // if non-nil, which keys of h to write. nil means all. - endStream bool - - date string - contentType string - contentLength string -} - -func encKV(enc *hpack.Encoder, k, v string) { - if VerboseLogs { - log.Printf("http2: server encoding header %q = %q", k, v) - } - enc.WriteField(hpack.HeaderField{Name: k, Value: v}) -} - -func (w *writeResHeaders) writeFrame(ctx writeContext) error { - enc, buf := ctx.HeaderEncoder() - buf.Reset() - - if w.httpResCode != 0 { - encKV(enc, ":status", httpCodeString(w.httpResCode)) - } - - encodeHeaders(enc, w.h, w.trailers) - - if w.contentType != "" { - encKV(enc, "content-type", w.contentType) - } - if w.contentLength != "" { - encKV(enc, "content-length", w.contentLength) - } - if w.date != "" { - encKV(enc, "date", w.date) - } - - headerBlock := buf.Bytes() - if len(headerBlock) == 0 && w.trailers == nil { - panic("unexpected empty hpack") - } - - // For now we're lazy and just pick the minimum MAX_FRAME_SIZE - // that all peers must support (16KB). Later we could care - // more and send larger frames if the peer advertised it, but - // there's little point. Most headers are small anyway (so we - // generally won't have CONTINUATION frames), and extra frames - // only waste 9 bytes anyway. - const maxFrameSize = 16384 - - first := true - for len(headerBlock) > 0 { - frag := headerBlock - if len(frag) > maxFrameSize { - frag = frag[:maxFrameSize] - } - headerBlock = headerBlock[len(frag):] - endHeaders := len(headerBlock) == 0 - var err error - if first { - first = false - err = ctx.Framer().WriteHeaders(HeadersFrameParam{ - StreamID: w.streamID, - BlockFragment: frag, - EndStream: w.endStream, - EndHeaders: endHeaders, - }) - } else { - err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag) - } - if err != nil { - return err - } - } - return nil -} - -type write100ContinueHeadersFrame struct { - streamID uint32 -} - -func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error { - enc, buf := ctx.HeaderEncoder() - buf.Reset() - encKV(enc, ":status", "100") - return ctx.Framer().WriteHeaders(HeadersFrameParam{ - StreamID: w.streamID, - BlockFragment: buf.Bytes(), - EndStream: false, - EndHeaders: true, - }) -} - -type writeWindowUpdate struct { - streamID uint32 // or 0 for conn-level - n uint32 -} - -func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { - return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n) -} - -func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { - if keys == nil { - sorter := sorterPool.Get().(*sorter) - // Using defer here, since the returned keys from the - // sorter.Keys method is only valid until the sorter - // is returned: - defer sorterPool.Put(sorter) - keys = sorter.Keys(h) - } - for _, k := range keys { - vv := h[k] - k = lowerHeader(k) - if !validWireHeaderFieldName(k) { - // Skip it as backup paranoia. Per - // golang.org/issue/14048, these should - // already be rejected at a higher level. - continue - } - isTE := k == "transfer-encoding" - for _, v := range vv { - if !httplex.ValidHeaderFieldValue(v) { - // TODO: return an error? golang.org/issue/14048 - // For now just omit it. - continue - } - // TODO: more of "8.1.2.2 Connection-Specific Header Fields" - if isTE && v != "trailers" { - continue - } - encKV(enc, k, v) - } - } -} diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go deleted file mode 100644 index c24316ce7b..0000000000 --- a/vendor/golang.org/x/net/http2/writesched.go +++ /dev/null @@ -1,283 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import "fmt" - -// frameWriteMsg is a request to write a frame. -type frameWriteMsg struct { - // write is the interface value that does the writing, once the - // writeScheduler (below) has decided to select this frame - // to write. The write functions are all defined in write.go. - write writeFramer - - stream *stream // used for prioritization. nil for non-stream frames. - - // done, if non-nil, must be a buffered channel with space for - // 1 message and is sent the return value from write (or an - // earlier error) when the frame has been written. - done chan error -} - -// for debugging only: -func (wm frameWriteMsg) String() string { - var streamID uint32 - if wm.stream != nil { - streamID = wm.stream.id - } - var des string - if s, ok := wm.write.(fmt.Stringer); ok { - des = s.String() - } else { - des = fmt.Sprintf("%T", wm.write) - } - return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des) -} - -// writeScheduler tracks pending frames to write, priorities, and decides -// the next one to use. It is not thread-safe. -type writeScheduler struct { - // zero are frames not associated with a specific stream. - // They're sent before any stream-specific freams. - zero writeQueue - - // maxFrameSize is the maximum size of a DATA frame - // we'll write. Must be non-zero and between 16K-16M. - maxFrameSize uint32 - - // sq contains the stream-specific queues, keyed by stream ID. - // when a stream is idle, it's deleted from the map. - sq map[uint32]*writeQueue - - // canSend is a slice of memory that's reused between frame - // scheduling decisions to hold the list of writeQueues (from sq) - // which have enough flow control data to send. After canSend is - // built, the best is selected. - canSend []*writeQueue - - // pool of empty queues for reuse. - queuePool []*writeQueue -} - -func (ws *writeScheduler) putEmptyQueue(q *writeQueue) { - if len(q.s) != 0 { - panic("queue must be empty") - } - ws.queuePool = append(ws.queuePool, q) -} - -func (ws *writeScheduler) getEmptyQueue() *writeQueue { - ln := len(ws.queuePool) - if ln == 0 { - return new(writeQueue) - } - q := ws.queuePool[ln-1] - ws.queuePool = ws.queuePool[:ln-1] - return q -} - -func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 } - -func (ws *writeScheduler) add(wm frameWriteMsg) { - st := wm.stream - if st == nil { - ws.zero.push(wm) - } else { - ws.streamQueue(st.id).push(wm) - } -} - -func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue { - if q, ok := ws.sq[streamID]; ok { - return q - } - if ws.sq == nil { - ws.sq = make(map[uint32]*writeQueue) - } - q := ws.getEmptyQueue() - ws.sq[streamID] = q - return q -} - -// take returns the most important frame to write and removes it from the scheduler. -// It is illegal to call this if the scheduler is empty or if there are no connection-level -// flow control bytes available. -func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) { - if ws.maxFrameSize == 0 { - panic("internal error: ws.maxFrameSize not initialized or invalid") - } - - // If there any frames not associated with streams, prefer those first. - // These are usually SETTINGS, etc. - if !ws.zero.empty() { - return ws.zero.shift(), true - } - if len(ws.sq) == 0 { - return - } - - // Next, prioritize frames on streams that aren't DATA frames (no cost). - for id, q := range ws.sq { - if q.firstIsNoCost() { - return ws.takeFrom(id, q) - } - } - - // Now, all that remains are DATA frames with non-zero bytes to - // send. So pick the best one. - if len(ws.canSend) != 0 { - panic("should be empty") - } - for _, q := range ws.sq { - if n := ws.streamWritableBytes(q); n > 0 { - ws.canSend = append(ws.canSend, q) - } - } - if len(ws.canSend) == 0 { - return - } - defer ws.zeroCanSend() - - // TODO: find the best queue - q := ws.canSend[0] - - return ws.takeFrom(q.streamID(), q) -} - -// zeroCanSend is defered from take. -func (ws *writeScheduler) zeroCanSend() { - for i := range ws.canSend { - ws.canSend[i] = nil - } - ws.canSend = ws.canSend[:0] -} - -// streamWritableBytes returns the number of DATA bytes we could write -// from the given queue's stream, if this stream/queue were -// selected. It is an error to call this if q's head isn't a -// *writeData. -func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 { - wm := q.head() - ret := wm.stream.flow.available() // max we can write - if ret == 0 { - return 0 - } - if int32(ws.maxFrameSize) < ret { - ret = int32(ws.maxFrameSize) - } - if ret == 0 { - panic("internal error: ws.maxFrameSize not initialized or invalid") - } - wd := wm.write.(*writeData) - if len(wd.p) < int(ret) { - ret = int32(len(wd.p)) - } - return ret -} - -func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) { - wm = q.head() - // If the first item in this queue costs flow control tokens - // and we don't have enough, write as much as we can. - if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 { - allowed := wm.stream.flow.available() // max we can write - if allowed == 0 { - // No quota available. Caller can try the next stream. - return frameWriteMsg{}, false - } - if int32(ws.maxFrameSize) < allowed { - allowed = int32(ws.maxFrameSize) - } - // TODO: further restrict the allowed size, because even if - // the peer says it's okay to write 16MB data frames, we might - // want to write smaller ones to properly weight competing - // streams' priorities. - - if len(wd.p) > int(allowed) { - wm.stream.flow.take(allowed) - chunk := wd.p[:allowed] - wd.p = wd.p[allowed:] - // Make up a new write message of a valid size, rather - // than shifting one off the queue. - return frameWriteMsg{ - stream: wm.stream, - write: &writeData{ - streamID: wd.streamID, - p: chunk, - // even if the original had endStream set, there - // arebytes remaining because len(wd.p) > allowed, - // so we know endStream is false: - endStream: false, - }, - // our caller is blocking on the final DATA frame, not - // these intermediates, so no need to wait: - done: nil, - }, true - } - wm.stream.flow.take(int32(len(wd.p))) - } - - q.shift() - if q.empty() { - ws.putEmptyQueue(q) - delete(ws.sq, id) - } - return wm, true -} - -func (ws *writeScheduler) forgetStream(id uint32) { - q, ok := ws.sq[id] - if !ok { - return - } - delete(ws.sq, id) - - // But keep it for others later. - for i := range q.s { - q.s[i] = frameWriteMsg{} - } - q.s = q.s[:0] - ws.putEmptyQueue(q) -} - -type writeQueue struct { - s []frameWriteMsg -} - -// streamID returns the stream ID for a non-empty stream-specific queue. -func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id } - -func (q *writeQueue) empty() bool { return len(q.s) == 0 } - -func (q *writeQueue) push(wm frameWriteMsg) { - q.s = append(q.s, wm) -} - -// head returns the next item that would be removed by shift. -func (q *writeQueue) head() frameWriteMsg { - if len(q.s) == 0 { - panic("invalid use of queue") - } - return q.s[0] -} - -func (q *writeQueue) shift() frameWriteMsg { - if len(q.s) == 0 { - panic("invalid use of queue") - } - wm := q.s[0] - // TODO: less copy-happy queue. - copy(q.s, q.s[1:]) - q.s[len(q.s)-1] = frameWriteMsg{} - q.s = q.s[:len(q.s)-1] - return wm -} - -func (q *writeQueue) firstIsNoCost() bool { - if df, ok := q.s[0].write.(*writeData); ok { - return len(df.p) == 0 - } - return true -} diff --git a/vendor/golang.org/x/net/lex/httplex/httplex.go b/vendor/golang.org/x/net/lex/httplex/httplex.go deleted file mode 100644 index bd0ec24f44..0000000000 --- a/vendor/golang.org/x/net/lex/httplex/httplex.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package httplex contains rules around lexical matters of various -// HTTP-related specifications. -// -// This package is shared by the standard library (which vendors it) -// and x/net/http2. It comes with no API stability promise. -package httplex - -import ( - "strings" - "unicode/utf8" -) - -var isTokenTable = [127]bool{ - '!': true, - '#': true, - '$': true, - '%': true, - '&': true, - '\'': true, - '*': true, - '+': true, - '-': true, - '.': true, - '0': true, - '1': true, - '2': true, - '3': true, - '4': true, - '5': true, - '6': true, - '7': true, - '8': true, - '9': true, - 'A': true, - 'B': true, - 'C': true, - 'D': true, - 'E': true, - 'F': true, - 'G': true, - 'H': true, - 'I': true, - 'J': true, - 'K': true, - 'L': true, - 'M': true, - 'N': true, - 'O': true, - 'P': true, - 'Q': true, - 'R': true, - 'S': true, - 'T': true, - 'U': true, - 'W': true, - 'V': true, - 'X': true, - 'Y': true, - 'Z': true, - '^': true, - '_': true, - '`': true, - 'a': true, - 'b': true, - 'c': true, - 'd': true, - 'e': true, - 'f': true, - 'g': true, - 'h': true, - 'i': true, - 'j': true, - 'k': true, - 'l': true, - 'm': true, - 'n': true, - 'o': true, - 'p': true, - 'q': true, - 'r': true, - 's': true, - 't': true, - 'u': true, - 'v': true, - 'w': true, - 'x': true, - 'y': true, - 'z': true, - '|': true, - '~': true, -} - -func IsTokenRune(r rune) bool { - i := int(r) - return i < len(isTokenTable) && isTokenTable[i] -} - -func isNotToken(r rune) bool { - return !IsTokenRune(r) -} - -// HeaderValuesContainsToken reports whether any string in values -// contains the provided token, ASCII case-insensitively. -func HeaderValuesContainsToken(values []string, token string) bool { - for _, v := range values { - if headerValueContainsToken(v, token) { - return true - } - } - return false -} - -// isOWS reports whether b is an optional whitespace byte, as defined -// by RFC 7230 section 3.2.3. -func isOWS(b byte) bool { return b == ' ' || b == '\t' } - -// trimOWS returns x with all optional whitespace removes from the -// beginning and end. -func trimOWS(x string) string { - // TODO: consider using strings.Trim(x, " \t") instead, - // if and when it's fast enough. See issue 10292. - // But this ASCII-only code will probably always beat UTF-8 - // aware code. - for len(x) > 0 && isOWS(x[0]) { - x = x[1:] - } - for len(x) > 0 && isOWS(x[len(x)-1]) { - x = x[:len(x)-1] - } - return x -} - -// headerValueContainsToken reports whether v (assumed to be a -// 0#element, in the ABNF extension described in RFC 7230 section 7) -// contains token amongst its comma-separated tokens, ASCII -// case-insensitively. -func headerValueContainsToken(v string, token string) bool { - v = trimOWS(v) - if comma := strings.IndexByte(v, ','); comma != -1 { - return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token) - } - return tokenEqual(v, token) -} - -// lowerASCII returns the ASCII lowercase version of b. -func lowerASCII(b byte) byte { - if 'A' <= b && b <= 'Z' { - return b + ('a' - 'A') - } - return b -} - -// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively. -func tokenEqual(t1, t2 string) bool { - if len(t1) != len(t2) { - return false - } - for i, b := range t1 { - if b >= utf8.RuneSelf { - // No UTF-8 or non-ASCII allowed in tokens. - return false - } - if lowerASCII(byte(b)) != lowerASCII(t2[i]) { - return false - } - } - return true -} - -// isLWS reports whether b is linear white space, according -// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 -// LWS = [CRLF] 1*( SP | HT ) -func isLWS(b byte) bool { return b == ' ' || b == '\t' } - -// isCTL reports whether b is a control byte, according -// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 -// CTL = -func isCTL(b byte) bool { - const del = 0x7f // a CTL - return b < ' ' || b == del -} - -// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name. -// HTTP/2 imposes the additional restriction that uppercase ASCII -// letters are not allowed. -// -// RFC 7230 says: -// header-field = field-name ":" OWS field-value OWS -// field-name = token -// token = 1*tchar -// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." / -// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA -func ValidHeaderFieldName(v string) bool { - if len(v) == 0 { - return false - } - for _, r := range v { - if !IsTokenRune(r) { - return false - } - } - return true -} - -// ValidHostHeader reports whether h is a valid host header. -func ValidHostHeader(h string) bool { - // The latest spec is actually this: - // - // http://tools.ietf.org/html/rfc7230#section-5.4 - // Host = uri-host [ ":" port ] - // - // Where uri-host is: - // http://tools.ietf.org/html/rfc3986#section-3.2.2 - // - // But we're going to be much more lenient for now and just - // search for any byte that's not a valid byte in any of those - // expressions. - for i := 0; i < len(h); i++ { - if !validHostByte[h[i]] { - return false - } - } - return true -} - -// See the validHostHeader comment. -var validHostByte = [256]bool{ - '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true, - '8': true, '9': true, - - 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true, - 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true, - 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true, - 'y': true, 'z': true, - - 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true, - 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true, - 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true, - 'Y': true, 'Z': true, - - '!': true, // sub-delims - '$': true, // sub-delims - '%': true, // pct-encoded (and used in IPv6 zones) - '&': true, // sub-delims - '(': true, // sub-delims - ')': true, // sub-delims - '*': true, // sub-delims - '+': true, // sub-delims - ',': true, // sub-delims - '-': true, // unreserved - '.': true, // unreserved - ':': true, // IPv6address + Host expression's optional port - ';': true, // sub-delims - '=': true, // sub-delims - '[': true, - '\'': true, // sub-delims - ']': true, - '_': true, // unreserved - '~': true, // unreserved -} - -// ValidHeaderFieldValue reports whether v is a valid "field-value" according to -// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 : -// -// message-header = field-name ":" [ field-value ] -// field-value = *( field-content | LWS ) -// field-content = -// -// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 : -// -// TEXT = -// LWS = [CRLF] 1*( SP | HT ) -// CTL = -// -// RFC 7230 says: -// field-value = *( field-content / obs-fold ) -// obj-fold = N/A to http2, and deprecated -// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] -// field-vchar = VCHAR / obs-text -// obs-text = %x80-FF -// VCHAR = "any visible [USASCII] character" -// -// http2 further says: "Similarly, HTTP/2 allows header field values -// that are not valid. While most of the values that can be encoded -// will not alter header field parsing, carriage return (CR, ASCII -// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII -// 0x0) might be exploited by an attacker if they are translated -// verbatim. Any request or response that contains a character not -// permitted in a header field value MUST be treated as malformed -// (Section 8.1.2.6). Valid characters are defined by the -// field-content ABNF rule in Section 3.2 of [RFC7230]." -// -// This function does not (yet?) properly handle the rejection of -// strings that begin or end with SP or HTAB. -func ValidHeaderFieldValue(v string) bool { - for i := 0; i < len(v); i++ { - b := v[i] - if isCTL(b) && !isLWS(b) { - return false - } - } - return true -} diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go deleted file mode 100644 index e66c7e3282..0000000000 --- a/vendor/golang.org/x/net/trace/events.go +++ /dev/null @@ -1,524 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package trace - -import ( - "bytes" - "fmt" - "html/template" - "io" - "log" - "net/http" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "sync/atomic" - "text/tabwriter" - "time" -) - -var eventsTmpl = template.Must(template.New("events").Funcs(template.FuncMap{ - "elapsed": elapsed, - "trimSpace": strings.TrimSpace, -}).Parse(eventsHTML)) - -const maxEventsPerLog = 100 - -type bucket struct { - MaxErrAge time.Duration - String string -} - -var buckets = []bucket{ - {0, "total"}, - {10 * time.Second, "errs<10s"}, - {1 * time.Minute, "errs<1m"}, - {10 * time.Minute, "errs<10m"}, - {1 * time.Hour, "errs<1h"}, - {10 * time.Hour, "errs<10h"}, - {24000 * time.Hour, "errors"}, -} - -// RenderEvents renders the HTML page typically served at /debug/events. -// It does not do any auth checking; see AuthRequest for the default auth check -// used by the handler registered on http.DefaultServeMux. -// req may be nil. -func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { - now := time.Now() - data := &struct { - Families []string // family names - Buckets []bucket - Counts [][]int // eventLog count per family/bucket - - // Set when a bucket has been selected. - Family string - Bucket int - EventLogs eventLogs - Expanded bool - }{ - Buckets: buckets, - } - - data.Families = make([]string, 0, len(families)) - famMu.RLock() - for name := range families { - data.Families = append(data.Families, name) - } - famMu.RUnlock() - sort.Strings(data.Families) - - // Count the number of eventLogs in each family for each error age. - data.Counts = make([][]int, len(data.Families)) - for i, name := range data.Families { - // TODO(sameer): move this loop under the family lock. - f := getEventFamily(name) - data.Counts[i] = make([]int, len(data.Buckets)) - for j, b := range data.Buckets { - data.Counts[i][j] = f.Count(now, b.MaxErrAge) - } - } - - if req != nil { - var ok bool - data.Family, data.Bucket, ok = parseEventsArgs(req) - if !ok { - // No-op - } else { - data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) - } - if data.EventLogs != nil { - defer data.EventLogs.Free() - sort.Sort(data.EventLogs) - } - if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { - data.Expanded = exp - } - } - - famMu.RLock() - defer famMu.RUnlock() - if err := eventsTmpl.Execute(w, data); err != nil { - log.Printf("net/trace: Failed executing template: %v", err) - } -} - -func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { - fam, bStr := req.FormValue("fam"), req.FormValue("b") - if fam == "" || bStr == "" { - return "", 0, false - } - b, err := strconv.Atoi(bStr) - if err != nil || b < 0 || b >= len(buckets) { - return "", 0, false - } - return fam, b, true -} - -// An EventLog provides a log of events associated with a specific object. -type EventLog interface { - // Printf formats its arguments with fmt.Sprintf and adds the - // result to the event log. - Printf(format string, a ...interface{}) - - // Errorf is like Printf, but it marks this event as an error. - Errorf(format string, a ...interface{}) - - // Finish declares that this event log is complete. - // The event log should not be used after calling this method. - Finish() -} - -// NewEventLog returns a new EventLog with the specified family name -// and title. -func NewEventLog(family, title string) EventLog { - el := newEventLog() - el.ref() - el.Family, el.Title = family, title - el.Start = time.Now() - el.events = make([]logEntry, 0, maxEventsPerLog) - el.stack = make([]uintptr, 32) - n := runtime.Callers(2, el.stack) - el.stack = el.stack[:n] - - getEventFamily(family).add(el) - return el -} - -func (el *eventLog) Finish() { - getEventFamily(el.Family).remove(el) - el.unref() // matches ref in New -} - -var ( - famMu sync.RWMutex - families = make(map[string]*eventFamily) // family name => family -) - -func getEventFamily(fam string) *eventFamily { - famMu.Lock() - defer famMu.Unlock() - f := families[fam] - if f == nil { - f = &eventFamily{} - families[fam] = f - } - return f -} - -type eventFamily struct { - mu sync.RWMutex - eventLogs eventLogs -} - -func (f *eventFamily) add(el *eventLog) { - f.mu.Lock() - f.eventLogs = append(f.eventLogs, el) - f.mu.Unlock() -} - -func (f *eventFamily) remove(el *eventLog) { - f.mu.Lock() - defer f.mu.Unlock() - for i, el0 := range f.eventLogs { - if el == el0 { - copy(f.eventLogs[i:], f.eventLogs[i+1:]) - f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] - return - } - } -} - -func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { - f.mu.RLock() - defer f.mu.RUnlock() - for _, el := range f.eventLogs { - if el.hasRecentError(now, maxErrAge) { - n++ - } - } - return -} - -func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { - f.mu.RLock() - defer f.mu.RUnlock() - els = make(eventLogs, 0, len(f.eventLogs)) - for _, el := range f.eventLogs { - if el.hasRecentError(now, maxErrAge) { - el.ref() - els = append(els, el) - } - } - return -} - -type eventLogs []*eventLog - -// Free calls unref on each element of the list. -func (els eventLogs) Free() { - for _, el := range els { - el.unref() - } -} - -// eventLogs may be sorted in reverse chronological order. -func (els eventLogs) Len() int { return len(els) } -func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } -func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } - -// A logEntry is a timestamped log entry in an event log. -type logEntry struct { - When time.Time - Elapsed time.Duration // since previous event in log - NewDay bool // whether this event is on a different day to the previous event - What string - IsErr bool -} - -// WhenString returns a string representation of the elapsed time of the event. -// It will include the date if midnight was crossed. -func (e logEntry) WhenString() string { - if e.NewDay { - return e.When.Format("2006/01/02 15:04:05.000000") - } - return e.When.Format("15:04:05.000000") -} - -// An eventLog represents an active event log. -type eventLog struct { - // Family is the top-level grouping of event logs to which this belongs. - Family string - - // Title is the title of this event log. - Title string - - // Timing information. - Start time.Time - - // Call stack where this event log was created. - stack []uintptr - - // Append-only sequence of events. - // - // TODO(sameer): change this to a ring buffer to avoid the array copy - // when we hit maxEventsPerLog. - mu sync.RWMutex - events []logEntry - LastErrorTime time.Time - discarded int - - refs int32 // how many buckets this is in -} - -func (el *eventLog) reset() { - // Clear all but the mutex. Mutexes may not be copied, even when unlocked. - el.Family = "" - el.Title = "" - el.Start = time.Time{} - el.stack = nil - el.events = nil - el.LastErrorTime = time.Time{} - el.discarded = 0 - el.refs = 0 -} - -func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { - if maxErrAge == 0 { - return true - } - el.mu.RLock() - defer el.mu.RUnlock() - return now.Sub(el.LastErrorTime) < maxErrAge -} - -// delta returns the elapsed time since the last event or the log start, -// and whether it spans midnight. -// L >= el.mu -func (el *eventLog) delta(t time.Time) (time.Duration, bool) { - if len(el.events) == 0 { - return t.Sub(el.Start), false - } - prev := el.events[len(el.events)-1].When - return t.Sub(prev), prev.Day() != t.Day() - -} - -func (el *eventLog) Printf(format string, a ...interface{}) { - el.printf(false, format, a...) -} - -func (el *eventLog) Errorf(format string, a ...interface{}) { - el.printf(true, format, a...) -} - -func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { - e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} - el.mu.Lock() - e.Elapsed, e.NewDay = el.delta(e.When) - if len(el.events) < maxEventsPerLog { - el.events = append(el.events, e) - } else { - // Discard the oldest event. - if el.discarded == 0 { - // el.discarded starts at two to count for the event it - // is replacing, plus the next one that we are about to - // drop. - el.discarded = 2 - } else { - el.discarded++ - } - // TODO(sameer): if this causes allocations on a critical path, - // change eventLog.What to be a fmt.Stringer, as in trace.go. - el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) - // The timestamp of the discarded meta-event should be - // the time of the last event it is representing. - el.events[0].When = el.events[1].When - copy(el.events[1:], el.events[2:]) - el.events[maxEventsPerLog-1] = e - } - if e.IsErr { - el.LastErrorTime = e.When - } - el.mu.Unlock() -} - -func (el *eventLog) ref() { - atomic.AddInt32(&el.refs, 1) -} - -func (el *eventLog) unref() { - if atomic.AddInt32(&el.refs, -1) == 0 { - freeEventLog(el) - } -} - -func (el *eventLog) When() string { - return el.Start.Format("2006/01/02 15:04:05.000000") -} - -func (el *eventLog) ElapsedTime() string { - elapsed := time.Since(el.Start) - return fmt.Sprintf("%.6f", elapsed.Seconds()) -} - -func (el *eventLog) Stack() string { - buf := new(bytes.Buffer) - tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) - printStackRecord(tw, el.stack) - tw.Flush() - return buf.String() -} - -// printStackRecord prints the function + source line information -// for a single stack trace. -// Adapted from runtime/pprof/pprof.go. -func printStackRecord(w io.Writer, stk []uintptr) { - for _, pc := range stk { - f := runtime.FuncForPC(pc) - if f == nil { - continue - } - file, line := f.FileLine(pc) - name := f.Name() - // Hide runtime.goexit and any runtime functions at the beginning. - if strings.HasPrefix(name, "runtime.") { - continue - } - fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) - } -} - -func (el *eventLog) Events() []logEntry { - el.mu.RLock() - defer el.mu.RUnlock() - return el.events -} - -// freeEventLogs is a freelist of *eventLog -var freeEventLogs = make(chan *eventLog, 1000) - -// newEventLog returns a event log ready to use. -func newEventLog() *eventLog { - select { - case el := <-freeEventLogs: - return el - default: - return new(eventLog) - } -} - -// freeEventLog adds el to freeEventLogs if there's room. -// This is non-blocking. -func freeEventLog(el *eventLog) { - el.reset() - select { - case freeEventLogs <- el: - default: - } -} - -const eventsHTML = ` - - - events - - - - -

/debug/events

- - - {{range $i, $fam := .Families}} - - - - {{range $j, $bucket := $.Buckets}} - {{$n := index $.Counts $i $j}} - - {{end}} - - {{end}} -
{{$fam}} - {{if $n}}{{end}} - [{{$n}} {{$bucket.String}}] - {{if $n}}{{end}} -
- -{{if $.EventLogs}} -
-

Family: {{$.Family}}

- -{{if $.Expanded}}{{end}} -[Summary]{{if $.Expanded}}{{end}} - -{{if not $.Expanded}}{{end}} -[Expanded]{{if not $.Expanded}}{{end}} - - - - {{range $el := $.EventLogs}} - - - - - {{if $.Expanded}} - - - - - - {{range $el.Events}} - - - - - - {{end}} - {{end}} - {{end}} -
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} -
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
-{{end}} - - -` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go deleted file mode 100644 index bb42aa5320..0000000000 --- a/vendor/golang.org/x/net/trace/histogram.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package trace - -// This file implements histogramming for RPC statistics collection. - -import ( - "bytes" - "fmt" - "html/template" - "log" - "math" - - "golang.org/x/net/internal/timeseries" -) - -const ( - bucketCount = 38 -) - -// histogram keeps counts of values in buckets that are spaced -// out in powers of 2: 0-1, 2-3, 4-7... -// histogram implements timeseries.Observable -type histogram struct { - sum int64 // running total of measurements - sumOfSquares float64 // square of running total - buckets []int64 // bucketed values for histogram - value int // holds a single value as an optimization - valueCount int64 // number of values recorded for single value -} - -// AddMeasurement records a value measurement observation to the histogram. -func (h *histogram) addMeasurement(value int64) { - // TODO: assert invariant - h.sum += value - h.sumOfSquares += float64(value) * float64(value) - - bucketIndex := getBucket(value) - - if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { - h.value = bucketIndex - h.valueCount++ - } else { - h.allocateBuckets() - h.buckets[bucketIndex]++ - } -} - -func (h *histogram) allocateBuckets() { - if h.buckets == nil { - h.buckets = make([]int64, bucketCount) - h.buckets[h.value] = h.valueCount - h.value = 0 - h.valueCount = -1 - } -} - -func log2(i int64) int { - n := 0 - for ; i >= 0x100; i >>= 8 { - n += 8 - } - for ; i > 0; i >>= 1 { - n += 1 - } - return n -} - -func getBucket(i int64) (index int) { - index = log2(i) - 1 - if index < 0 { - index = 0 - } - if index >= bucketCount { - index = bucketCount - 1 - } - return -} - -// Total returns the number of recorded observations. -func (h *histogram) total() (total int64) { - if h.valueCount >= 0 { - total = h.valueCount - } - for _, val := range h.buckets { - total += int64(val) - } - return -} - -// Average returns the average value of recorded observations. -func (h *histogram) average() float64 { - t := h.total() - if t == 0 { - return 0 - } - return float64(h.sum) / float64(t) -} - -// Variance returns the variance of recorded observations. -func (h *histogram) variance() float64 { - t := float64(h.total()) - if t == 0 { - return 0 - } - s := float64(h.sum) / t - return h.sumOfSquares/t - s*s -} - -// StandardDeviation returns the standard deviation of recorded observations. -func (h *histogram) standardDeviation() float64 { - return math.Sqrt(h.variance()) -} - -// PercentileBoundary estimates the value that the given fraction of recorded -// observations are less than. -func (h *histogram) percentileBoundary(percentile float64) int64 { - total := h.total() - - // Corner cases (make sure result is strictly less than Total()) - if total == 0 { - return 0 - } else if total == 1 { - return int64(h.average()) - } - - percentOfTotal := round(float64(total) * percentile) - var runningTotal int64 - - for i := range h.buckets { - value := h.buckets[i] - runningTotal += value - if runningTotal == percentOfTotal { - // We hit an exact bucket boundary. If the next bucket has data, it is a - // good estimate of the value. If the bucket is empty, we interpolate the - // midpoint between the next bucket's boundary and the next non-zero - // bucket. If the remaining buckets are all empty, then we use the - // boundary for the next bucket as the estimate. - j := uint8(i + 1) - min := bucketBoundary(j) - if runningTotal < total { - for h.buckets[j] == 0 { - j++ - } - } - max := bucketBoundary(j) - return min + round(float64(max-min)/2) - } else if runningTotal > percentOfTotal { - // The value is in this bucket. Interpolate the value. - delta := runningTotal - percentOfTotal - percentBucket := float64(value-delta) / float64(value) - bucketMin := bucketBoundary(uint8(i)) - nextBucketMin := bucketBoundary(uint8(i + 1)) - bucketSize := nextBucketMin - bucketMin - return bucketMin + round(percentBucket*float64(bucketSize)) - } - } - return bucketBoundary(bucketCount - 1) -} - -// Median returns the estimated median of the observed values. -func (h *histogram) median() int64 { - return h.percentileBoundary(0.5) -} - -// Add adds other to h. -func (h *histogram) Add(other timeseries.Observable) { - o := other.(*histogram) - if o.valueCount == 0 { - // Other histogram is empty - } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { - // Both have a single bucketed value, aggregate them - h.valueCount += o.valueCount - } else { - // Two different values necessitate buckets in this histogram - h.allocateBuckets() - if o.valueCount >= 0 { - h.buckets[o.value] += o.valueCount - } else { - for i := range h.buckets { - h.buckets[i] += o.buckets[i] - } - } - } - h.sumOfSquares += o.sumOfSquares - h.sum += o.sum -} - -// Clear resets the histogram to an empty state, removing all observed values. -func (h *histogram) Clear() { - h.buckets = nil - h.value = 0 - h.valueCount = 0 - h.sum = 0 - h.sumOfSquares = 0 -} - -// CopyFrom copies from other, which must be a *histogram, into h. -func (h *histogram) CopyFrom(other timeseries.Observable) { - o := other.(*histogram) - if o.valueCount == -1 { - h.allocateBuckets() - copy(h.buckets, o.buckets) - } - h.sum = o.sum - h.sumOfSquares = o.sumOfSquares - h.value = o.value - h.valueCount = o.valueCount -} - -// Multiply scales the histogram by the specified ratio. -func (h *histogram) Multiply(ratio float64) { - if h.valueCount == -1 { - for i := range h.buckets { - h.buckets[i] = int64(float64(h.buckets[i]) * ratio) - } - } else { - h.valueCount = int64(float64(h.valueCount) * ratio) - } - h.sum = int64(float64(h.sum) * ratio) - h.sumOfSquares = h.sumOfSquares * ratio -} - -// New creates a new histogram. -func (h *histogram) New() timeseries.Observable { - r := new(histogram) - r.Clear() - return r -} - -func (h *histogram) String() string { - return fmt.Sprintf("%d, %f, %d, %d, %v", - h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) -} - -// round returns the closest int64 to the argument -func round(in float64) int64 { - return int64(math.Floor(in + 0.5)) -} - -// bucketBoundary returns the first value in the bucket. -func bucketBoundary(bucket uint8) int64 { - if bucket == 0 { - return 0 - } - return 1 << bucket -} - -// bucketData holds data about a specific bucket for use in distTmpl. -type bucketData struct { - Lower, Upper int64 - N int64 - Pct, CumulativePct float64 - GraphWidth int -} - -// data holds data about a Distribution for use in distTmpl. -type data struct { - Buckets []*bucketData - Count, Median int64 - Mean, StandardDeviation float64 -} - -// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. -const maxHTMLBarWidth = 350.0 - -// newData returns data representing h for use in distTmpl. -func (h *histogram) newData() *data { - // Force the allocation of buckets to simplify the rendering implementation - h.allocateBuckets() - // We scale the bars on the right so that the largest bar is - // maxHTMLBarWidth pixels in width. - maxBucket := int64(0) - for _, n := range h.buckets { - if n > maxBucket { - maxBucket = n - } - } - total := h.total() - barsizeMult := maxHTMLBarWidth / float64(maxBucket) - var pctMult float64 - if total == 0 { - pctMult = 1.0 - } else { - pctMult = 100.0 / float64(total) - } - - buckets := make([]*bucketData, len(h.buckets)) - runningTotal := int64(0) - for i, n := range h.buckets { - if n == 0 { - continue - } - runningTotal += n - var upperBound int64 - if i < bucketCount-1 { - upperBound = bucketBoundary(uint8(i + 1)) - } else { - upperBound = math.MaxInt64 - } - buckets[i] = &bucketData{ - Lower: bucketBoundary(uint8(i)), - Upper: upperBound, - N: n, - Pct: float64(n) * pctMult, - CumulativePct: float64(runningTotal) * pctMult, - GraphWidth: int(float64(n) * barsizeMult), - } - } - return &data{ - Buckets: buckets, - Count: total, - Median: h.median(), - Mean: h.average(), - StandardDeviation: h.standardDeviation(), - } -} - -func (h *histogram) html() template.HTML { - buf := new(bytes.Buffer) - if err := distTmpl.Execute(buf, h.newData()); err != nil { - buf.Reset() - log.Printf("net/trace: couldn't execute template: %v", err) - } - return template.HTML(buf.String()) -} - -// Input: data -var distTmpl = template.Must(template.New("distTmpl").Parse(` - - - - - - - -
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
-
- -{{range $b := .Buckets}} -{{if $b}} - - - - - - - - - -{{end}} -{{end}} -
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
-`)) diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go deleted file mode 100644 index d860fccf9b..0000000000 --- a/vendor/golang.org/x/net/trace/trace.go +++ /dev/null @@ -1,1063 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package trace implements tracing of requests and long-lived objects. -It exports HTTP interfaces on /debug/requests and /debug/events. - -A trace.Trace provides tracing for short-lived objects, usually requests. -A request handler might be implemented like this: - - func fooHandler(w http.ResponseWriter, req *http.Request) { - tr := trace.New("mypkg.Foo", req.URL.Path) - defer tr.Finish() - ... - tr.LazyPrintf("some event %q happened", str) - ... - if err := somethingImportant(); err != nil { - tr.LazyPrintf("somethingImportant failed: %v", err) - tr.SetError() - } - } - -The /debug/requests HTTP endpoint organizes the traces by family, -errors, and duration. It also provides histogram of request duration -for each family. - -A trace.EventLog provides tracing for long-lived objects, such as RPC -connections. - - // A Fetcher fetches URL paths for a single domain. - type Fetcher struct { - domain string - events trace.EventLog - } - - func NewFetcher(domain string) *Fetcher { - return &Fetcher{ - domain, - trace.NewEventLog("mypkg.Fetcher", domain), - } - } - - func (f *Fetcher) Fetch(path string) (string, error) { - resp, err := http.Get("http://" + f.domain + "/" + path) - if err != nil { - f.events.Errorf("Get(%q) = %v", path, err) - return "", err - } - f.events.Printf("Get(%q) = %s", path, resp.Status) - ... - } - - func (f *Fetcher) Close() error { - f.events.Finish() - return nil - } - -The /debug/events HTTP endpoint organizes the event logs by family and -by time since the last error. The expanded view displays recent log -entries and the log's call stack. -*/ -package trace // import "golang.org/x/net/trace" - -import ( - "bytes" - "fmt" - "html/template" - "io" - "log" - "net" - "net/http" - "runtime" - "sort" - "strconv" - "sync" - "sync/atomic" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/internal/timeseries" -) - -// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. -// FOR DEBUGGING ONLY. This will slow down the program. -var DebugUseAfterFinish = false - -// AuthRequest determines whether a specific request is permitted to load the -// /debug/requests or /debug/events pages. -// -// It returns two bools; the first indicates whether the page may be viewed at all, -// and the second indicates whether sensitive events will be shown. -// -// AuthRequest may be replaced by a program to customise its authorisation requirements. -// -// The default AuthRequest function returns (true, true) if and only if the request -// comes from localhost/127.0.0.1/[::1]. -var AuthRequest = func(req *http.Request) (any, sensitive bool) { - // RemoteAddr is commonly in the form "IP" or "IP:port". - // If it is in the form "IP:port", split off the port. - host, _, err := net.SplitHostPort(req.RemoteAddr) - if err != nil { - host = req.RemoteAddr - } - switch host { - case "localhost", "127.0.0.1", "::1": - return true, true - default: - return false, false - } -} - -func init() { - http.HandleFunc("/debug/requests", func(w http.ResponseWriter, req *http.Request) { - any, sensitive := AuthRequest(req) - if !any { - http.Error(w, "not allowed", http.StatusUnauthorized) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - Render(w, req, sensitive) - }) - http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) { - any, sensitive := AuthRequest(req) - if !any { - http.Error(w, "not allowed", http.StatusUnauthorized) - return - } - w.Header().Set("Content-Type", "text/html; charset=utf-8") - RenderEvents(w, req, sensitive) - }) -} - -// Render renders the HTML page typically served at /debug/requests. -// It does not do any auth checking; see AuthRequest for the default auth check -// used by the handler registered on http.DefaultServeMux. -// req may be nil. -func Render(w io.Writer, req *http.Request, sensitive bool) { - data := &struct { - Families []string - ActiveTraceCount map[string]int - CompletedTraces map[string]*family - - // Set when a bucket has been selected. - Traces traceList - Family string - Bucket int - Expanded bool - Traced bool - Active bool - ShowSensitive bool // whether to show sensitive events - - Histogram template.HTML - HistogramWindow string // e.g. "last minute", "last hour", "all time" - - // If non-zero, the set of traces is a partial set, - // and this is the total number. - Total int - }{ - CompletedTraces: completedTraces, - } - - data.ShowSensitive = sensitive - if req != nil { - // Allow show_sensitive=0 to force hiding of sensitive data for testing. - // This only goes one way; you can't use show_sensitive=1 to see things. - if req.FormValue("show_sensitive") == "0" { - data.ShowSensitive = false - } - - if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { - data.Expanded = exp - } - if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { - data.Traced = exp - } - } - - completedMu.RLock() - data.Families = make([]string, 0, len(completedTraces)) - for fam := range completedTraces { - data.Families = append(data.Families, fam) - } - completedMu.RUnlock() - sort.Strings(data.Families) - - // We are careful here to minimize the time spent locking activeMu, - // since that lock is required every time an RPC starts and finishes. - data.ActiveTraceCount = make(map[string]int, len(data.Families)) - activeMu.RLock() - for fam, s := range activeTraces { - data.ActiveTraceCount[fam] = s.Len() - } - activeMu.RUnlock() - - var ok bool - data.Family, data.Bucket, ok = parseArgs(req) - switch { - case !ok: - // No-op - case data.Bucket == -1: - data.Active = true - n := data.ActiveTraceCount[data.Family] - data.Traces = getActiveTraces(data.Family) - if len(data.Traces) < n { - data.Total = n - } - case data.Bucket < bucketsPerFamily: - if b := lookupBucket(data.Family, data.Bucket); b != nil { - data.Traces = b.Copy(data.Traced) - } - default: - if f := getFamily(data.Family, false); f != nil { - var obs timeseries.Observable - f.LatencyMu.RLock() - switch o := data.Bucket - bucketsPerFamily; o { - case 0: - obs = f.Latency.Minute() - data.HistogramWindow = "last minute" - case 1: - obs = f.Latency.Hour() - data.HistogramWindow = "last hour" - case 2: - obs = f.Latency.Total() - data.HistogramWindow = "all time" - } - f.LatencyMu.RUnlock() - if obs != nil { - data.Histogram = obs.(*histogram).html() - } - } - } - - if data.Traces != nil { - defer data.Traces.Free() - sort.Sort(data.Traces) - } - - completedMu.RLock() - defer completedMu.RUnlock() - if err := pageTmpl.ExecuteTemplate(w, "Page", data); err != nil { - log.Printf("net/trace: Failed executing template: %v", err) - } -} - -func parseArgs(req *http.Request) (fam string, b int, ok bool) { - if req == nil { - return "", 0, false - } - fam, bStr := req.FormValue("fam"), req.FormValue("b") - if fam == "" || bStr == "" { - return "", 0, false - } - b, err := strconv.Atoi(bStr) - if err != nil || b < -1 { - return "", 0, false - } - - return fam, b, true -} - -func lookupBucket(fam string, b int) *traceBucket { - f := getFamily(fam, false) - if f == nil || b < 0 || b >= len(f.Buckets) { - return nil - } - return f.Buckets[b] -} - -type contextKeyT string - -var contextKey = contextKeyT("golang.org/x/net/trace.Trace") - -// NewContext returns a copy of the parent context -// and associates it with a Trace. -func NewContext(ctx context.Context, tr Trace) context.Context { - return context.WithValue(ctx, contextKey, tr) -} - -// FromContext returns the Trace bound to the context, if any. -func FromContext(ctx context.Context) (tr Trace, ok bool) { - tr, ok = ctx.Value(contextKey).(Trace) - return -} - -// Trace represents an active request. -type Trace interface { - // LazyLog adds x to the event log. It will be evaluated each time the - // /debug/requests page is rendered. Any memory referenced by x will be - // pinned until the trace is finished and later discarded. - LazyLog(x fmt.Stringer, sensitive bool) - - // LazyPrintf evaluates its arguments with fmt.Sprintf each time the - // /debug/requests page is rendered. Any memory referenced by a will be - // pinned until the trace is finished and later discarded. - LazyPrintf(format string, a ...interface{}) - - // SetError declares that this trace resulted in an error. - SetError() - - // SetRecycler sets a recycler for the trace. - // f will be called for each event passed to LazyLog at a time when - // it is no longer required, whether while the trace is still active - // and the event is discarded, or when a completed trace is discarded. - SetRecycler(f func(interface{})) - - // SetTraceInfo sets the trace info for the trace. - // This is currently unused. - SetTraceInfo(traceID, spanID uint64) - - // SetMaxEvents sets the maximum number of events that will be stored - // in the trace. This has no effect if any events have already been - // added to the trace. - SetMaxEvents(m int) - - // Finish declares that this trace is complete. - // The trace should not be used after calling this method. - Finish() -} - -type lazySprintf struct { - format string - a []interface{} -} - -func (l *lazySprintf) String() string { - return fmt.Sprintf(l.format, l.a...) -} - -// New returns a new Trace with the specified family and title. -func New(family, title string) Trace { - tr := newTrace() - tr.ref() - tr.Family, tr.Title = family, title - tr.Start = time.Now() - tr.events = make([]event, 0, maxEventsPerTrace) - - activeMu.RLock() - s := activeTraces[tr.Family] - activeMu.RUnlock() - if s == nil { - activeMu.Lock() - s = activeTraces[tr.Family] // check again - if s == nil { - s = new(traceSet) - activeTraces[tr.Family] = s - } - activeMu.Unlock() - } - s.Add(tr) - - // Trigger allocation of the completed trace structure for this family. - // This will cause the family to be present in the request page during - // the first trace of this family. We don't care about the return value, - // nor is there any need for this to run inline, so we execute it in its - // own goroutine, but only if the family isn't allocated yet. - completedMu.RLock() - if _, ok := completedTraces[tr.Family]; !ok { - go allocFamily(tr.Family) - } - completedMu.RUnlock() - - return tr -} - -func (tr *trace) Finish() { - tr.Elapsed = time.Now().Sub(tr.Start) - if DebugUseAfterFinish { - buf := make([]byte, 4<<10) // 4 KB should be enough - n := runtime.Stack(buf, false) - tr.finishStack = buf[:n] - } - - activeMu.RLock() - m := activeTraces[tr.Family] - activeMu.RUnlock() - m.Remove(tr) - - f := getFamily(tr.Family, true) - for _, b := range f.Buckets { - if b.Cond.match(tr) { - b.Add(tr) - } - } - // Add a sample of elapsed time as microseconds to the family's timeseries - h := new(histogram) - h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3) - f.LatencyMu.Lock() - f.Latency.Add(h) - f.LatencyMu.Unlock() - - tr.unref() // matches ref in New -} - -const ( - bucketsPerFamily = 9 - tracesPerBucket = 10 - maxActiveTraces = 20 // Maximum number of active traces to show. - maxEventsPerTrace = 10 - numHistogramBuckets = 38 -) - -var ( - // The active traces. - activeMu sync.RWMutex - activeTraces = make(map[string]*traceSet) // family -> traces - - // Families of completed traces. - completedMu sync.RWMutex - completedTraces = make(map[string]*family) // family -> traces -) - -type traceSet struct { - mu sync.RWMutex - m map[*trace]bool - - // We could avoid the entire map scan in FirstN by having a slice of all the traces - // ordered by start time, and an index into that from the trace struct, with a periodic - // repack of the slice after enough traces finish; we could also use a skip list or similar. - // However, that would shift some of the expense from /debug/requests time to RPC time, - // which is probably the wrong trade-off. -} - -func (ts *traceSet) Len() int { - ts.mu.RLock() - defer ts.mu.RUnlock() - return len(ts.m) -} - -func (ts *traceSet) Add(tr *trace) { - ts.mu.Lock() - if ts.m == nil { - ts.m = make(map[*trace]bool) - } - ts.m[tr] = true - ts.mu.Unlock() -} - -func (ts *traceSet) Remove(tr *trace) { - ts.mu.Lock() - delete(ts.m, tr) - ts.mu.Unlock() -} - -// FirstN returns the first n traces ordered by time. -func (ts *traceSet) FirstN(n int) traceList { - ts.mu.RLock() - defer ts.mu.RUnlock() - - if n > len(ts.m) { - n = len(ts.m) - } - trl := make(traceList, 0, n) - - // Fast path for when no selectivity is needed. - if n == len(ts.m) { - for tr := range ts.m { - tr.ref() - trl = append(trl, tr) - } - sort.Sort(trl) - return trl - } - - // Pick the oldest n traces. - // This is inefficient. See the comment in the traceSet struct. - for tr := range ts.m { - // Put the first n traces into trl in the order they occur. - // When we have n, sort trl, and thereafter maintain its order. - if len(trl) < n { - tr.ref() - trl = append(trl, tr) - if len(trl) == n { - // This is guaranteed to happen exactly once during this loop. - sort.Sort(trl) - } - continue - } - if tr.Start.After(trl[n-1].Start) { - continue - } - - // Find where to insert this one. - tr.ref() - i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) - trl[n-1].unref() - copy(trl[i+1:], trl[i:]) - trl[i] = tr - } - - return trl -} - -func getActiveTraces(fam string) traceList { - activeMu.RLock() - s := activeTraces[fam] - activeMu.RUnlock() - if s == nil { - return nil - } - return s.FirstN(maxActiveTraces) -} - -func getFamily(fam string, allocNew bool) *family { - completedMu.RLock() - f := completedTraces[fam] - completedMu.RUnlock() - if f == nil && allocNew { - f = allocFamily(fam) - } - return f -} - -func allocFamily(fam string) *family { - completedMu.Lock() - defer completedMu.Unlock() - f := completedTraces[fam] - if f == nil { - f = newFamily() - completedTraces[fam] = f - } - return f -} - -// family represents a set of trace buckets and associated latency information. -type family struct { - // traces may occur in multiple buckets. - Buckets [bucketsPerFamily]*traceBucket - - // latency time series - LatencyMu sync.RWMutex - Latency *timeseries.MinuteHourSeries -} - -func newFamily() *family { - return &family{ - Buckets: [bucketsPerFamily]*traceBucket{ - {Cond: minCond(0)}, - {Cond: minCond(50 * time.Millisecond)}, - {Cond: minCond(100 * time.Millisecond)}, - {Cond: minCond(200 * time.Millisecond)}, - {Cond: minCond(500 * time.Millisecond)}, - {Cond: minCond(1 * time.Second)}, - {Cond: minCond(10 * time.Second)}, - {Cond: minCond(100 * time.Second)}, - {Cond: errorCond{}}, - }, - Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), - } -} - -// traceBucket represents a size-capped bucket of historic traces, -// along with a condition for a trace to belong to the bucket. -type traceBucket struct { - Cond cond - - // Ring buffer implementation of a fixed-size FIFO queue. - mu sync.RWMutex - buf [tracesPerBucket]*trace - start int // < tracesPerBucket - length int // <= tracesPerBucket -} - -func (b *traceBucket) Add(tr *trace) { - b.mu.Lock() - defer b.mu.Unlock() - - i := b.start + b.length - if i >= tracesPerBucket { - i -= tracesPerBucket - } - if b.length == tracesPerBucket { - // "Remove" an element from the bucket. - b.buf[i].unref() - b.start++ - if b.start == tracesPerBucket { - b.start = 0 - } - } - b.buf[i] = tr - if b.length < tracesPerBucket { - b.length++ - } - tr.ref() -} - -// Copy returns a copy of the traces in the bucket. -// If tracedOnly is true, only the traces with trace information will be returned. -// The logs will be ref'd before returning; the caller should call -// the Free method when it is done with them. -// TODO(dsymonds): keep track of traced requests in separate buckets. -func (b *traceBucket) Copy(tracedOnly bool) traceList { - b.mu.RLock() - defer b.mu.RUnlock() - - trl := make(traceList, 0, b.length) - for i, x := 0, b.start; i < b.length; i++ { - tr := b.buf[x] - if !tracedOnly || tr.spanID != 0 { - tr.ref() - trl = append(trl, tr) - } - x++ - if x == b.length { - x = 0 - } - } - return trl -} - -func (b *traceBucket) Empty() bool { - b.mu.RLock() - defer b.mu.RUnlock() - return b.length == 0 -} - -// cond represents a condition on a trace. -type cond interface { - match(t *trace) bool - String() string -} - -type minCond time.Duration - -func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } -func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } - -type errorCond struct{} - -func (e errorCond) match(t *trace) bool { return t.IsError } -func (e errorCond) String() string { return "errors" } - -type traceList []*trace - -// Free calls unref on each element of the list. -func (trl traceList) Free() { - for _, t := range trl { - t.unref() - } -} - -// traceList may be sorted in reverse chronological order. -func (trl traceList) Len() int { return len(trl) } -func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } -func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } - -// An event is a timestamped log entry in a trace. -type event struct { - When time.Time - Elapsed time.Duration // since previous event in trace - NewDay bool // whether this event is on a different day to the previous event - Recyclable bool // whether this event was passed via LazyLog - What interface{} // string or fmt.Stringer - Sensitive bool // whether this event contains sensitive information -} - -// WhenString returns a string representation of the elapsed time of the event. -// It will include the date if midnight was crossed. -func (e event) WhenString() string { - if e.NewDay { - return e.When.Format("2006/01/02 15:04:05.000000") - } - return e.When.Format("15:04:05.000000") -} - -// discarded represents a number of discarded events. -// It is stored as *discarded to make it easier to update in-place. -type discarded int - -func (d *discarded) String() string { - return fmt.Sprintf("(%d events discarded)", int(*d)) -} - -// trace represents an active or complete request, -// either sent or received by this program. -type trace struct { - // Family is the top-level grouping of traces to which this belongs. - Family string - - // Title is the title of this trace. - Title string - - // Timing information. - Start time.Time - Elapsed time.Duration // zero while active - - // Trace information if non-zero. - traceID uint64 - spanID uint64 - - // Whether this trace resulted in an error. - IsError bool - - // Append-only sequence of events (modulo discards). - mu sync.RWMutex - events []event - - refs int32 // how many buckets this is in - recycler func(interface{}) - disc discarded // scratch space to avoid allocation - - finishStack []byte // where finish was called, if DebugUseAfterFinish is set -} - -func (tr *trace) reset() { - // Clear all but the mutex. Mutexes may not be copied, even when unlocked. - tr.Family = "" - tr.Title = "" - tr.Start = time.Time{} - tr.Elapsed = 0 - tr.traceID = 0 - tr.spanID = 0 - tr.IsError = false - tr.events = nil - tr.refs = 0 - tr.recycler = nil - tr.disc = 0 - tr.finishStack = nil -} - -// delta returns the elapsed time since the last event or the trace start, -// and whether it spans midnight. -// L >= tr.mu -func (tr *trace) delta(t time.Time) (time.Duration, bool) { - if len(tr.events) == 0 { - return t.Sub(tr.Start), false - } - prev := tr.events[len(tr.events)-1].When - return t.Sub(prev), prev.Day() != t.Day() -} - -func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { - if DebugUseAfterFinish && tr.finishStack != nil { - buf := make([]byte, 4<<10) // 4 KB should be enough - n := runtime.Stack(buf, false) - log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) - } - - /* - NOTE TO DEBUGGERS - - If you are here because your program panicked in this code, - it is almost definitely the fault of code using this package, - and very unlikely to be the fault of this code. - - The most likely scenario is that some code elsewhere is using - a requestz.Trace after its Finish method is called. - You can temporarily set the DebugUseAfterFinish var - to help discover where that is; do not leave that var set, - since it makes this package much less efficient. - */ - - e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} - tr.mu.Lock() - e.Elapsed, e.NewDay = tr.delta(e.When) - if len(tr.events) < cap(tr.events) { - tr.events = append(tr.events, e) - } else { - // Discard the middle events. - di := int((cap(tr.events) - 1) / 2) - if d, ok := tr.events[di].What.(*discarded); ok { - (*d)++ - } else { - // disc starts at two to count for the event it is replacing, - // plus the next one that we are about to drop. - tr.disc = 2 - if tr.recycler != nil && tr.events[di].Recyclable { - go tr.recycler(tr.events[di].What) - } - tr.events[di].What = &tr.disc - } - // The timestamp of the discarded meta-event should be - // the time of the last event it is representing. - tr.events[di].When = tr.events[di+1].When - - if tr.recycler != nil && tr.events[di+1].Recyclable { - go tr.recycler(tr.events[di+1].What) - } - copy(tr.events[di+1:], tr.events[di+2:]) - tr.events[cap(tr.events)-1] = e - } - tr.mu.Unlock() -} - -func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { - tr.addEvent(x, true, sensitive) -} - -func (tr *trace) LazyPrintf(format string, a ...interface{}) { - tr.addEvent(&lazySprintf{format, a}, false, false) -} - -func (tr *trace) SetError() { tr.IsError = true } - -func (tr *trace) SetRecycler(f func(interface{})) { - tr.recycler = f -} - -func (tr *trace) SetTraceInfo(traceID, spanID uint64) { - tr.traceID, tr.spanID = traceID, spanID -} - -func (tr *trace) SetMaxEvents(m int) { - // Always keep at least three events: first, discarded count, last. - if len(tr.events) == 0 && m > 3 { - tr.events = make([]event, 0, m) - } -} - -func (tr *trace) ref() { - atomic.AddInt32(&tr.refs, 1) -} - -func (tr *trace) unref() { - if atomic.AddInt32(&tr.refs, -1) == 0 { - if tr.recycler != nil { - // freeTrace clears tr, so we hold tr.recycler and tr.events here. - go func(f func(interface{}), es []event) { - for _, e := range es { - if e.Recyclable { - f(e.What) - } - } - }(tr.recycler, tr.events) - } - - freeTrace(tr) - } -} - -func (tr *trace) When() string { - return tr.Start.Format("2006/01/02 15:04:05.000000") -} - -func (tr *trace) ElapsedTime() string { - t := tr.Elapsed - if t == 0 { - // Active trace. - t = time.Since(tr.Start) - } - return fmt.Sprintf("%.6f", t.Seconds()) -} - -func (tr *trace) Events() []event { - tr.mu.RLock() - defer tr.mu.RUnlock() - return tr.events -} - -var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? - -// newTrace returns a trace ready to use. -func newTrace() *trace { - select { - case tr := <-traceFreeList: - return tr - default: - return new(trace) - } -} - -// freeTrace adds tr to traceFreeList if there's room. -// This is non-blocking. -func freeTrace(tr *trace) { - if DebugUseAfterFinish { - return // never reuse - } - tr.reset() - select { - case traceFreeList <- tr: - default: - } -} - -func elapsed(d time.Duration) string { - b := []byte(fmt.Sprintf("%.6f", d.Seconds())) - - // For subsecond durations, blank all zeros before decimal point, - // and all zeros between the decimal point and the first non-zero digit. - if d < time.Second { - dot := bytes.IndexByte(b, '.') - for i := 0; i < dot; i++ { - b[i] = ' ' - } - for i := dot + 1; i < len(b); i++ { - if b[i] == '0' { - b[i] = ' ' - } else { - break - } - } - } - - return string(b) -} - -var pageTmpl = template.Must(template.New("Page").Funcs(template.FuncMap{ - "elapsed": elapsed, - "add": func(a, b int) int { return a + b }, -}).Parse(pageHTML)) - -const pageHTML = ` -{{template "Prolog" .}} -{{template "StatusTable" .}} -{{template "Epilog" .}} - -{{define "Prolog"}} - - - /debug/requests - - - - -

/debug/requests

-{{end}} {{/* end of Prolog */}} - -{{define "StatusTable"}} - - {{range $fam := .Families}} - - - - {{$n := index $.ActiveTraceCount $fam}} - - - {{$f := index $.CompletedTraces $fam}} - {{range $i, $b := $f.Buckets}} - {{$empty := $b.Empty}} - - {{end}} - - {{$nb := len $f.Buckets}} - - - - - - {{end}} -
{{$fam}} - {{if $n}}{{end}} - [{{$n}} active] - {{if $n}}{{end}} - - {{if not $empty}}{{end}} - [{{.Cond}}] - {{if not $empty}}{{end}} - - [minute] - - [hour] - - [total] -
-{{end}} {{/* end of StatusTable */}} - -{{define "Epilog"}} -{{if $.Traces}} -
-

Family: {{$.Family}}

- -{{if or $.Expanded $.Traced}} - [Normal/Summary] -{{else}} - [Normal/Summary] -{{end}} - -{{if or (not $.Expanded) $.Traced}} - [Normal/Expanded] -{{else}} - [Normal/Expanded] -{{end}} - -{{if not $.Active}} - {{if or $.Expanded (not $.Traced)}} - [Traced/Summary] - {{else}} - [Traced/Summary] - {{end}} - {{if or (not $.Expanded) (not $.Traced)}} - [Traced/Expanded] - {{else}} - [Traced/Expanded] - {{end}} -{{end}} - -{{if $.Total}} -

Showing {{len $.Traces}} of {{$.Total}} traces.

-{{end}} - - - - - {{range $tr := $.Traces}} - - - - - {{/* TODO: include traceID/spanID */}} - - {{if $.Expanded}} - {{range $tr.Events}} - - - - - - {{end}} - {{end}} - {{end}} -
- {{if $.Active}}Active{{else}}Completed{{end}} Requests -
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
-{{end}} {{/* if $.Traces */}} - -{{if $.Histogram}} -

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

-{{$.Histogram}} -{{end}} {{/* if $.Histogram */}} - - - -{{end}} {{/* end of Epilog */}} -` diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md deleted file mode 100644 index 36cd6f7581..0000000000 --- a/vendor/google.golang.org/grpc/CONTRIBUTING.md +++ /dev/null @@ -1,46 +0,0 @@ -# How to contribute - -We definitely welcome patches and contribution to grpc! Here are some guidelines -and information about how to do so. - -## Sending patches - -### Getting started - -1. Check out the code: - - $ go get google.golang.org/grpc - $ cd $GOPATH/src/google.golang.org/grpc - -1. Create a fork of the grpc-go repository. -1. Add your fork as a remote: - - $ git remote add fork git@github.com:$YOURGITHUBUSERNAME/grpc-go.git - -1. Make changes, commit them. -1. Run the test suite: - - $ make test - -1. Push your changes to your fork: - - $ git push fork ... - -1. Open a pull request. - -## Legal requirements - -In order to protect both you and ourselves, you will need to sign the -[Contributor License Agreement](https://cla.developers.google.com/clas). - -## Filing Issues -When filing an issue, make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -### Contributing code -Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE deleted file mode 100644 index f4988b4507..0000000000 --- a/vendor/google.golang.org/grpc/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2014, Google Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile deleted file mode 100644 index 03bb01f0b3..0000000000 --- a/vendor/google.golang.org/grpc/Makefile +++ /dev/null @@ -1,52 +0,0 @@ -all: test testrace - -deps: - go get -d -v google.golang.org/grpc/... - -updatedeps: - go get -d -v -u -f google.golang.org/grpc/... - -testdeps: - go get -d -v -t google.golang.org/grpc/... - -updatetestdeps: - go get -d -v -t -u -f google.golang.org/grpc/... - -build: deps - go build google.golang.org/grpc/... - -proto: - @ if ! which protoc > /dev/null; then \ - echo "error: protoc not installed" >&2; \ - exit 1; \ - fi - go get -u -v github.com/golang/protobuf/protoc-gen-go - # use $$dir as the root for all proto files in the same directory - for dir in $$(git ls-files '*.proto' | xargs -n1 dirname | uniq); do \ - protoc -I $$dir --go_out=plugins=grpc:$$dir $$dir/*.proto; \ - done - -test: testdeps - go test -v -cpu 1,4 google.golang.org/grpc/... - -testrace: testdeps - go test -v -race -cpu 1,4 google.golang.org/grpc/... - -clean: - go clean -i google.golang.org/grpc/... - -coverage: testdeps - ./coverage.sh --coveralls - -.PHONY: \ - all \ - deps \ - updatedeps \ - testdeps \ - updatetestdeps \ - build \ - proto \ - test \ - testrace \ - clean \ - coverage diff --git a/vendor/google.golang.org/grpc/PATENTS b/vendor/google.golang.org/grpc/PATENTS deleted file mode 100644 index 69b47959fa..0000000000 --- a/vendor/google.golang.org/grpc/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the gRPC project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of gRPC, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of gRPC. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of gRPC or any code incorporated within this -implementation of gRPC constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of gRPC -shall terminate as of the date such litigation is filed. diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md deleted file mode 100644 index 660658bed3..0000000000 --- a/vendor/google.golang.org/grpc/README.md +++ /dev/null @@ -1,32 +0,0 @@ -#gRPC-Go - -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) - -The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide. - -Installation ------------- - -To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run: - -``` -$ go get google.golang.org/grpc -``` - -Prerequisites -------------- - -This requires Go 1.5 or later . - -Constraints ------------ -The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants. - -Documentation -------------- -See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/). - -Status ------- -GA - diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go deleted file mode 100644 index 52f4f10fc2..0000000000 --- a/vendor/google.golang.org/grpc/backoff.go +++ /dev/null @@ -1,80 +0,0 @@ -package grpc - -import ( - "math/rand" - "time" -) - -// DefaultBackoffConfig uses values specified for backoff in -// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. -var ( - DefaultBackoffConfig = BackoffConfig{ - MaxDelay: 120 * time.Second, - baseDelay: 1.0 * time.Second, - factor: 1.6, - jitter: 0.2, - } -) - -// backoffStrategy defines the methodology for backing off after a grpc -// connection failure. -// -// This is unexported until the gRPC project decides whether or not to allow -// alternative backoff strategies. Once a decision is made, this type and its -// method may be exported. -type backoffStrategy interface { - // backoff returns the amount of time to wait before the next retry given - // the number of consecutive failures. - backoff(retries int) time.Duration -} - -// BackoffConfig defines the parameters for the default gRPC backoff strategy. -type BackoffConfig struct { - // MaxDelay is the upper bound of backoff delay. - MaxDelay time.Duration - - // TODO(stevvooe): The following fields are not exported, as allowing - // changes would violate the current gRPC specification for backoff. If - // gRPC decides to allow more interesting backoff strategies, these fields - // may be opened up in the future. - - // baseDelay is the amount of time to wait before retrying after the first - // failure. - baseDelay time.Duration - - // factor is applied to the backoff after each retry. - factor float64 - - // jitter provides a range to randomize backoff delays. - jitter float64 -} - -func setDefaults(bc *BackoffConfig) { - md := bc.MaxDelay - *bc = DefaultBackoffConfig - - if md > 0 { - bc.MaxDelay = md - } -} - -func (bc BackoffConfig) backoff(retries int) (t time.Duration) { - if retries == 0 { - return bc.baseDelay - } - backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay) - for backoff < max && retries > 0 { - backoff *= bc.factor - retries-- - } - if backoff > max { - backoff = max - } - // Randomize backoff delays so that if a cluster of requests start at - // the same time, they won't operate in lockstep. - backoff *= 1 + bc.jitter*(rand.Float64()*2-1) - if backoff < 0 { - return 0 - } - return time.Duration(backoff) -} diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go deleted file mode 100644 index 419e214611..0000000000 --- a/vendor/google.golang.org/grpc/balancer.go +++ /dev/null @@ -1,385 +0,0 @@ -/* - * - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "fmt" - "sync" - - "golang.org/x/net/context" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/naming" -) - -// Address represents a server the client connects to. -// This is the EXPERIMENTAL API and may be changed or extended in the future. -type Address struct { - // Addr is the server address on which a connection will be established. - Addr string - // Metadata is the information associated with Addr, which may be used - // to make load balancing decision. - Metadata interface{} -} - -// BalancerGetOptions configures a Get call. -// This is the EXPERIMENTAL API and may be changed or extended in the future. -type BalancerGetOptions struct { - // BlockingWait specifies whether Get should block when there is no - // connected address. - BlockingWait bool -} - -// Balancer chooses network addresses for RPCs. -// This is the EXPERIMENTAL API and may be changed or extended in the future. -type Balancer interface { - // Start does the initialization work to bootstrap a Balancer. For example, - // this function may start the name resolution and watch the updates. It will - // be called when dialing. - Start(target string) error - // Up informs the Balancer that gRPC has a connection to the server at - // addr. It returns down which is called once the connection to addr gets - // lost or closed. - // TODO: It is not clear how to construct and take advantage the meaningful error - // parameter for down. Need realistic demands to guide. - Up(addr Address) (down func(error)) - // Get gets the address of a server for the RPC corresponding to ctx. - // i) If it returns a connected address, gRPC internals issues the RPC on the - // connection to this address; - // ii) If it returns an address on which the connection is under construction - // (initiated by Notify(...)) but not connected, gRPC internals - // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or - // Shutdown state; - // or - // * issues RPC on the connection otherwise. - // iii) If it returns an address on which the connection does not exist, gRPC - // internals treats it as an error and will fail the corresponding RPC. - // - // Therefore, the following is the recommended rule when writing a custom Balancer. - // If opts.BlockingWait is true, it should return a connected address or - // block if there is no connected address. It should respect the timeout or - // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast - // RPCs), it should return an address it has notified via Notify(...) immediately - // instead of blocking. - // - // The function returns put which is called once the rpc has completed or failed. - // put can collect and report RPC stats to a remote load balancer. - // - // This function should only return the errors Balancer cannot recover by itself. - // gRPC internals will fail the RPC if an error is returned. - Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) - // Notify returns a channel that is used by gRPC internals to watch the addresses - // gRPC needs to connect. The addresses might be from a name resolver or remote - // load balancer. gRPC internals will compare it with the existing connected - // addresses. If the address Balancer notified is not in the existing connected - // addresses, gRPC starts to connect the address. If an address in the existing - // connected addresses is not in the notification list, the corresponding connection - // is shutdown gracefully. Otherwise, there are no operations to take. Note that - // the Address slice must be the full list of the Addresses which should be connected. - // It is NOT delta. - Notify() <-chan []Address - // Close shuts down the balancer. - Close() error -} - -// downErr implements net.Error. It is constructed by gRPC internals and passed to the down -// call of Balancer. -type downErr struct { - timeout bool - temporary bool - desc string -} - -func (e downErr) Error() string { return e.desc } -func (e downErr) Timeout() bool { return e.timeout } -func (e downErr) Temporary() bool { return e.temporary } - -func downErrorf(timeout, temporary bool, format string, a ...interface{}) downErr { - return downErr{ - timeout: timeout, - temporary: temporary, - desc: fmt.Sprintf(format, a...), - } -} - -// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch -// the name resolution updates and updates the addresses available correspondingly. -func RoundRobin(r naming.Resolver) Balancer { - return &roundRobin{r: r} -} - -type addrInfo struct { - addr Address - connected bool -} - -type roundRobin struct { - r naming.Resolver - w naming.Watcher - addrs []*addrInfo // all the addresses the client should potentially connect - mu sync.Mutex - addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to. - next int // index of the next address to return for Get() - waitCh chan struct{} // the channel to block when there is no connected address available - done bool // The Balancer is closed. -} - -func (rr *roundRobin) watchAddrUpdates() error { - updates, err := rr.w.Next() - if err != nil { - grpclog.Printf("grpc: the naming watcher stops working due to %v.\n", err) - return err - } - rr.mu.Lock() - defer rr.mu.Unlock() - for _, update := range updates { - addr := Address{ - Addr: update.Addr, - Metadata: update.Metadata, - } - switch update.Op { - case naming.Add: - var exist bool - for _, v := range rr.addrs { - if addr == v.addr { - exist = true - grpclog.Println("grpc: The name resolver wanted to add an existing address: ", addr) - break - } - } - if exist { - continue - } - rr.addrs = append(rr.addrs, &addrInfo{addr: addr}) - case naming.Delete: - for i, v := range rr.addrs { - if addr == v.addr { - copy(rr.addrs[i:], rr.addrs[i+1:]) - rr.addrs = rr.addrs[:len(rr.addrs)-1] - break - } - } - default: - grpclog.Println("Unknown update.Op ", update.Op) - } - } - // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. - open := make([]Address, len(rr.addrs)) - for i, v := range rr.addrs { - open[i] = v.addr - } - if rr.done { - return ErrClientConnClosing - } - rr.addrCh <- open - return nil -} - -func (rr *roundRobin) Start(target string) error { - if rr.r == nil { - // If there is no name resolver installed, it is not needed to - // do name resolution. In this case, target is added into rr.addrs - // as the only address available and rr.addrCh stays nil. - rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}}) - return nil - } - w, err := rr.r.Resolve(target) - if err != nil { - return err - } - rr.w = w - rr.addrCh = make(chan []Address) - go func() { - for { - if err := rr.watchAddrUpdates(); err != nil { - return - } - } - }() - return nil -} - -// Up sets the connected state of addr and sends notification if there are pending -// Get() calls. -func (rr *roundRobin) Up(addr Address) func(error) { - rr.mu.Lock() - defer rr.mu.Unlock() - var cnt int - for _, a := range rr.addrs { - if a.addr == addr { - if a.connected { - return nil - } - a.connected = true - } - if a.connected { - cnt++ - } - } - // addr is only one which is connected. Notify the Get() callers who are blocking. - if cnt == 1 && rr.waitCh != nil { - close(rr.waitCh) - rr.waitCh = nil - } - return func(err error) { - rr.down(addr, err) - } -} - -// down unsets the connected state of addr. -func (rr *roundRobin) down(addr Address, err error) { - rr.mu.Lock() - defer rr.mu.Unlock() - for _, a := range rr.addrs { - if addr == a.addr { - a.connected = false - break - } - } -} - -// Get returns the next addr in the rotation. -func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { - var ch chan struct{} - rr.mu.Lock() - if rr.done { - rr.mu.Unlock() - err = ErrClientConnClosing - return - } - - if len(rr.addrs) > 0 { - if rr.next >= len(rr.addrs) { - rr.next = 0 - } - next := rr.next - for { - a := rr.addrs[next] - next = (next + 1) % len(rr.addrs) - if a.connected { - addr = a.addr - rr.next = next - rr.mu.Unlock() - return - } - if next == rr.next { - // Has iterated all the possible address but none is connected. - break - } - } - } - if !opts.BlockingWait { - if len(rr.addrs) == 0 { - rr.mu.Unlock() - err = fmt.Errorf("there is no address available") - return - } - // Returns the next addr on rr.addrs for failfast RPCs. - addr = rr.addrs[rr.next].addr - rr.next++ - rr.mu.Unlock() - return - } - // Wait on rr.waitCh for non-failfast RPCs. - if rr.waitCh == nil { - ch = make(chan struct{}) - rr.waitCh = ch - } else { - ch = rr.waitCh - } - rr.mu.Unlock() - for { - select { - case <-ctx.Done(): - err = ctx.Err() - return - case <-ch: - rr.mu.Lock() - if rr.done { - rr.mu.Unlock() - err = ErrClientConnClosing - return - } - - if len(rr.addrs) > 0 { - if rr.next >= len(rr.addrs) { - rr.next = 0 - } - next := rr.next - for { - a := rr.addrs[next] - next = (next + 1) % len(rr.addrs) - if a.connected { - addr = a.addr - rr.next = next - rr.mu.Unlock() - return - } - if next == rr.next { - // Has iterated all the possible address but none is connected. - break - } - } - } - // The newly added addr got removed by Down() again. - if rr.waitCh == nil { - ch = make(chan struct{}) - rr.waitCh = ch - } else { - ch = rr.waitCh - } - rr.mu.Unlock() - } - } -} - -func (rr *roundRobin) Notify() <-chan []Address { - return rr.addrCh -} - -func (rr *roundRobin) Close() error { - rr.mu.Lock() - defer rr.mu.Unlock() - rr.done = true - if rr.w != nil { - rr.w.Close() - } - if rr.waitCh != nil { - close(rr.waitCh) - rr.waitCh = nil - } - if rr.addrCh != nil { - close(rr.addrCh) - } - return nil -} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go deleted file mode 100644 index fea07998d7..0000000000 --- a/vendor/google.golang.org/grpc/call.go +++ /dev/null @@ -1,226 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "bytes" - "io" - "math" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/trace" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/transport" -) - -// recvResponse receives and parses an RPC response. -// On error, it returns the error and indicates whether the call should be retried. -// -// TODO(zhaoq): Check whether the received message sequence is valid. -func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error { - // Try to acquire header metadata from the server if there is any. - var err error - defer func() { - if err != nil { - if _, ok := err.(transport.ConnectionError); !ok { - t.CloseStream(stream, err) - } - } - }() - c.headerMD, err = stream.Header() - if err != nil { - return err - } - p := &parser{r: stream} - for { - if err = recv(p, dopts.codec, stream, dopts.dc, reply, math.MaxInt32); err != nil { - if err == io.EOF { - break - } - return err - } - } - c.trailerMD = stream.Trailer() - return nil -} - -// sendRequest writes out various information of an RPC such as Context and Message. -func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) { - stream, err := t.NewStream(ctx, callHdr) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - // If err is connection error, t will be closed, no need to close stream here. - if _, ok := err.(transport.ConnectionError); !ok { - t.CloseStream(stream, err) - } - } - }() - var cbuf *bytes.Buffer - if compressor != nil { - cbuf = new(bytes.Buffer) - } - outBuf, err := encode(codec, args, compressor, cbuf) - if err != nil { - return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err) - } - err = t.Write(stream, outBuf, opts) - // t.NewStream(...) could lead to an early rejection of the RPC (e.g., the service/method - // does not exist.) so that t.Write could get io.EOF from wait(...). Leave the following - // recvResponse to get the final status. - if err != nil && err != io.EOF { - return nil, err - } - // Sent successfully. - return stream, nil -} - -// Invoke sends the RPC request on the wire and returns after response is received. -// Invoke is called by generated code. Also users can call Invoke directly when it -// is really needed in their use cases. -func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) { - c := defaultCallInfo - for _, o := range opts { - if err := o.before(&c); err != nil { - return toRPCErr(err) - } - } - defer func() { - for _, o := range opts { - o.after(&c) - } - }() - if EnableTracing { - c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) - defer c.traceInfo.tr.Finish() - c.traceInfo.firstLine.client = true - if deadline, ok := ctx.Deadline(); ok { - c.traceInfo.firstLine.deadline = deadline.Sub(time.Now()) - } - c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false) - // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set. - defer func() { - if err != nil { - c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - c.traceInfo.tr.SetError() - } - }() - } - topts := &transport.Options{ - Last: true, - Delay: false, - } - for { - var ( - err error - t transport.ClientTransport - stream *transport.Stream - // Record the put handler from Balancer.Get(...). It is called once the - // RPC has completed or failed. - put func() - ) - // TODO(zhaoq): Need a formal spec of fail-fast. - callHdr := &transport.CallHdr{ - Host: cc.authority, - Method: method, - } - if cc.dopts.cp != nil { - callHdr.SendCompress = cc.dopts.cp.Type() - } - gopts := BalancerGetOptions{ - BlockingWait: !c.failFast, - } - t, put, err = cc.getTransport(ctx, gopts) - if err != nil { - // TODO(zhaoq): Probably revisit the error handling. - if _, ok := err.(*rpcError); ok { - return err - } - if err == errConnClosing || err == errConnUnavailable { - if c.failFast { - return Errorf(codes.Unavailable, "%v", err) - } - continue - } - // All the other errors are treated as Internal errors. - return Errorf(codes.Internal, "%v", err) - } - if c.traceInfo.tr != nil { - c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true) - } - stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts) - if err != nil { - if put != nil { - put() - put = nil - } - // Retry a non-failfast RPC when - // i) there is a connection error; or - // ii) the server started to drain before this RPC was initiated. - if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain { - if c.failFast { - return toRPCErr(err) - } - continue - } - return toRPCErr(err) - } - err = recvResponse(cc.dopts, t, &c, stream, reply) - if err != nil { - if put != nil { - put() - put = nil - } - if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain { - if c.failFast { - return toRPCErr(err) - } - continue - } - return toRPCErr(err) - } - if c.traceInfo.tr != nil { - c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true) - } - t.CloseStream(stream, nil) - if put != nil { - put() - put = nil - } - return Errorf(stream.StatusCode(), "%s", stream.StatusDesc()) - } -} diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go deleted file mode 100644 index 1d3b46c605..0000000000 --- a/vendor/google.golang.org/grpc/clientconn.go +++ /dev/null @@ -1,857 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "errors" - "fmt" - "net" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/trace" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/transport" -) - -var ( - // ErrClientConnClosing indicates that the operation is illegal because - // the ClientConn is closing. - ErrClientConnClosing = errors.New("grpc: the client connection is closing") - // ErrClientConnTimeout indicates that the ClientConn cannot establish the - // underlying connections within the specified timeout. - ErrClientConnTimeout = errors.New("grpc: timed out when dialing") - - // errNoTransportSecurity indicates that there is no transport security - // being set for ClientConn. Users should either set one or explicitly - // call WithInsecure DialOption to disable security. - errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") - // errTransportCredentialsMissing indicates that users want to transmit security - // information (e.g., oauth2 token) which requires secure connection on an insecure - // connection. - errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") - // errCredentialsConflict indicates that grpc.WithTransportCredentials() - // and grpc.WithInsecure() are both called for a connection. - errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") - // errNetworkIO indicates that the connection is down due to some network I/O error. - errNetworkIO = errors.New("grpc: failed with network I/O error") - // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. - errConnDrain = errors.New("grpc: the connection is drained") - // errConnClosing indicates that the connection is closing. - errConnClosing = errors.New("grpc: the connection is closing") - // errConnUnavailable indicates that the connection is unavailable. - errConnUnavailable = errors.New("grpc: the connection is unavailable") - errNoAddr = errors.New("grpc: there is no address available to dial") - // minimum time to give a connection to complete - minConnectTimeout = 20 * time.Second -) - -// dialOptions configure a Dial call. dialOptions are set by the DialOption -// values passed to Dial. -type dialOptions struct { - codec Codec - cp Compressor - dc Decompressor - bs backoffStrategy - balancer Balancer - block bool - insecure bool - timeout time.Duration - copts transport.ConnectOptions -} - -// DialOption configures how we set up the connection. -type DialOption func(*dialOptions) - -// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling. -func WithCodec(c Codec) DialOption { - return func(o *dialOptions) { - o.codec = c - } -} - -// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message -// compressor. -func WithCompressor(cp Compressor) DialOption { - return func(o *dialOptions) { - o.cp = cp - } -} - -// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating -// message decompressor. -func WithDecompressor(dc Decompressor) DialOption { - return func(o *dialOptions) { - o.dc = dc - } -} - -// WithBalancer returns a DialOption which sets a load balancer. -func WithBalancer(b Balancer) DialOption { - return func(o *dialOptions) { - o.balancer = b - } -} - -// WithBackoffMaxDelay configures the dialer to use the provided maximum delay -// when backing off after failed connection attempts. -func WithBackoffMaxDelay(md time.Duration) DialOption { - return WithBackoffConfig(BackoffConfig{MaxDelay: md}) -} - -// WithBackoffConfig configures the dialer to use the provided backoff -// parameters after connection failures. -// -// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up -// for use. -func WithBackoffConfig(b BackoffConfig) DialOption { - // Set defaults to ensure that provided BackoffConfig is valid and - // unexported fields get default values. - setDefaults(&b) - return withBackoff(b) -} - -// withBackoff sets the backoff strategy used for retries after a -// failed connection attempt. -// -// This can be exported if arbitrary backoff strategies are allowed by gRPC. -func withBackoff(bs backoffStrategy) DialOption { - return func(o *dialOptions) { - o.bs = bs - } -} - -// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying -// connection is up. Without this, Dial returns immediately and connecting the server -// happens in background. -func WithBlock() DialOption { - return func(o *dialOptions) { - o.block = true - } -} - -// WithInsecure returns a DialOption which disables transport security for this ClientConn. -// Note that transport security is required unless WithInsecure is set. -func WithInsecure() DialOption { - return func(o *dialOptions) { - o.insecure = true - } -} - -// WithTransportCredentials returns a DialOption which configures a -// connection level security credentials (e.g., TLS/SSL). -func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { - return func(o *dialOptions) { - o.copts.TransportCredentials = creds - } -} - -// WithPerRPCCredentials returns a DialOption which sets -// credentials which will place auth state on each outbound RPC. -func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { - return func(o *dialOptions) { - o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) - } -} - -// WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn -// initially. This is valid if and only if WithBlock() is present. -func WithTimeout(d time.Duration) DialOption { - return func(o *dialOptions) { - o.timeout = d - } -} - -// WithDialer returns a DialOption that specifies a function to use for dialing network addresses. -func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { - return func(o *dialOptions) { - o.copts.Dialer = func(ctx context.Context, addr string) (net.Conn, error) { - if deadline, ok := ctx.Deadline(); ok { - return f(addr, deadline.Sub(time.Now())) - } - return f(addr, 0) - } - } -} - -// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs. -func WithUserAgent(s string) DialOption { - return func(o *dialOptions) { - o.copts.UserAgent = s - } -} - -// Dial creates a client connection to the given target. -func Dial(target string, opts ...DialOption) (*ClientConn, error) { - return DialContext(context.Background(), target, opts...) -} - -// DialContext creates a client connection to the given target. ctx can be used to -// cancel or expire the pending connecting. Once this function returns, the -// cancellation and expiration of ctx will be noop. Users should call ClientConn.Close -// to terminate all the pending operations after this function returns. -// This is the EXPERIMENTAL API. -func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { - cc := &ClientConn{ - target: target, - conns: make(map[Address]*addrConn), - } - cc.ctx, cc.cancel = context.WithCancel(context.Background()) - defer func() { - select { - case <-ctx.Done(): - conn, err = nil, ctx.Err() - default: - } - - if err != nil { - cc.Close() - } - }() - - for _, opt := range opts { - opt(&cc.dopts) - } - - // Set defaults. - if cc.dopts.codec == nil { - cc.dopts.codec = protoCodec{} - } - if cc.dopts.bs == nil { - cc.dopts.bs = DefaultBackoffConfig - } - - var ( - ok bool - addrs []Address - ) - if cc.dopts.balancer == nil { - // Connect to target directly if balancer is nil. - addrs = append(addrs, Address{Addr: target}) - } else { - if err := cc.dopts.balancer.Start(target); err != nil { - return nil, err - } - ch := cc.dopts.balancer.Notify() - if ch == nil { - // There is no name resolver installed. - addrs = append(addrs, Address{Addr: target}) - } else { - addrs, ok = <-ch - if !ok || len(addrs) == 0 { - return nil, errNoAddr - } - } - } - waitC := make(chan error, 1) - go func() { - for _, a := range addrs { - if err := cc.resetAddrConn(a, false, nil); err != nil { - waitC <- err - return - } - } - close(waitC) - }() - var timeoutCh <-chan time.Time - if cc.dopts.timeout > 0 { - timeoutCh = time.After(cc.dopts.timeout) - } - select { - case <-ctx.Done(): - return nil, ctx.Err() - case err := <-waitC: - if err != nil { - return nil, err - } - case <-timeoutCh: - return nil, ErrClientConnTimeout - } - // If balancer is nil or balancer.Notify() is nil, ok will be false here. - // The lbWatcher goroutine will not be created. - if ok { - go cc.lbWatcher() - } - colonPos := strings.LastIndex(target, ":") - if colonPos == -1 { - colonPos = len(target) - } - cc.authority = target[:colonPos] - return cc, nil -} - -// ConnectivityState indicates the state of a client connection. -type ConnectivityState int - -const ( - // Idle indicates the ClientConn is idle. - Idle ConnectivityState = iota - // Connecting indicates the ClienConn is connecting. - Connecting - // Ready indicates the ClientConn is ready for work. - Ready - // TransientFailure indicates the ClientConn has seen a failure but expects to recover. - TransientFailure - // Shutdown indicates the ClientConn has started shutting down. - Shutdown -) - -func (s ConnectivityState) String() string { - switch s { - case Idle: - return "IDLE" - case Connecting: - return "CONNECTING" - case Ready: - return "READY" - case TransientFailure: - return "TRANSIENT_FAILURE" - case Shutdown: - return "SHUTDOWN" - default: - panic(fmt.Sprintf("unknown connectivity state: %d", s)) - } -} - -// ClientConn represents a client connection to an RPC server. -type ClientConn struct { - ctx context.Context - cancel context.CancelFunc - - target string - authority string - dopts dialOptions - - mu sync.RWMutex - conns map[Address]*addrConn -} - -func (cc *ClientConn) lbWatcher() { - for addrs := range cc.dopts.balancer.Notify() { - var ( - add []Address // Addresses need to setup connections. - del []*addrConn // Connections need to tear down. - ) - cc.mu.Lock() - for _, a := range addrs { - if _, ok := cc.conns[a]; !ok { - add = append(add, a) - } - } - for k, c := range cc.conns { - var keep bool - for _, a := range addrs { - if k == a { - keep = true - break - } - } - if !keep { - del = append(del, c) - delete(cc.conns, c.addr) - } - } - cc.mu.Unlock() - for _, a := range add { - cc.resetAddrConn(a, true, nil) - } - for _, c := range del { - c.tearDown(errConnDrain) - } - } -} - -// resetAddrConn creates an addrConn for addr and adds it to cc.conns. -// If there is an old addrConn for addr, it will be torn down, using tearDownErr as the reason. -// If tearDownErr is nil, errConnDrain will be used instead. -func (cc *ClientConn) resetAddrConn(addr Address, skipWait bool, tearDownErr error) error { - ac := &addrConn{ - cc: cc, - addr: addr, - dopts: cc.dopts, - } - ac.ctx, ac.cancel = context.WithCancel(cc.ctx) - ac.stateCV = sync.NewCond(&ac.mu) - if EnableTracing { - ac.events = trace.NewEventLog("grpc.ClientConn", ac.addr.Addr) - } - if !ac.dopts.insecure { - if ac.dopts.copts.TransportCredentials == nil { - return errNoTransportSecurity - } - } else { - if ac.dopts.copts.TransportCredentials != nil { - return errCredentialsConflict - } - for _, cd := range ac.dopts.copts.PerRPCCredentials { - if cd.RequireTransportSecurity() { - return errTransportCredentialsMissing - } - } - } - // Track ac in cc. This needs to be done before any getTransport(...) is called. - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return ErrClientConnClosing - } - stale := cc.conns[ac.addr] - cc.conns[ac.addr] = ac - cc.mu.Unlock() - if stale != nil { - // There is an addrConn alive on ac.addr already. This could be due to - // 1) a buggy Balancer notifies duplicated Addresses; - // 2) goaway was received, a new ac will replace the old ac. - // The old ac should be deleted from cc.conns, but the - // underlying transport should drain rather than close. - if tearDownErr == nil { - // tearDownErr is nil if resetAddrConn is called by - // 1) Dial - // 2) lbWatcher - // In both cases, the stale ac should drain, not close. - stale.tearDown(errConnDrain) - } else { - stale.tearDown(tearDownErr) - } - } - // skipWait may overwrite the decision in ac.dopts.block. - if ac.dopts.block && !skipWait { - if err := ac.resetTransport(false); err != nil { - if err != errConnClosing { - // Tear down ac and delete it from cc.conns. - cc.mu.Lock() - delete(cc.conns, ac.addr) - cc.mu.Unlock() - ac.tearDown(err) - } - if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { - return e.Origin() - } - return err - } - // Start to monitor the error status of transport. - go ac.transportMonitor() - } else { - // Start a goroutine connecting to the server asynchronously. - go func() { - if err := ac.resetTransport(false); err != nil { - grpclog.Printf("Failed to dial %s: %v; please retry.", ac.addr.Addr, err) - if err != errConnClosing { - // Keep this ac in cc.conns, to get the reason it's torn down. - ac.tearDown(err) - } - return - } - ac.transportMonitor() - }() - } - return nil -} - -func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) { - var ( - ac *addrConn - ok bool - put func() - ) - if cc.dopts.balancer == nil { - // If balancer is nil, there should be only one addrConn available. - cc.mu.RLock() - if cc.conns == nil { - cc.mu.RUnlock() - return nil, nil, toRPCErr(ErrClientConnClosing) - } - for _, ac = range cc.conns { - // Break after the first iteration to get the first addrConn. - ok = true - break - } - cc.mu.RUnlock() - } else { - var ( - addr Address - err error - ) - addr, put, err = cc.dopts.balancer.Get(ctx, opts) - if err != nil { - return nil, nil, toRPCErr(err) - } - cc.mu.RLock() - if cc.conns == nil { - cc.mu.RUnlock() - return nil, nil, toRPCErr(ErrClientConnClosing) - } - ac, ok = cc.conns[addr] - cc.mu.RUnlock() - } - if !ok { - if put != nil { - put() - } - return nil, nil, errConnClosing - } - t, err := ac.wait(ctx, cc.dopts.balancer != nil, !opts.BlockingWait) - if err != nil { - if put != nil { - put() - } - return nil, nil, err - } - return t, put, nil -} - -// Close tears down the ClientConn and all underlying connections. -func (cc *ClientConn) Close() error { - cc.cancel() - - cc.mu.Lock() - if cc.conns == nil { - cc.mu.Unlock() - return ErrClientConnClosing - } - conns := cc.conns - cc.conns = nil - cc.mu.Unlock() - if cc.dopts.balancer != nil { - cc.dopts.balancer.Close() - } - for _, ac := range conns { - ac.tearDown(ErrClientConnClosing) - } - return nil -} - -// addrConn is a network connection to a given address. -type addrConn struct { - ctx context.Context - cancel context.CancelFunc - - cc *ClientConn - addr Address - dopts dialOptions - events trace.EventLog - - mu sync.Mutex - state ConnectivityState - stateCV *sync.Cond - down func(error) // the handler called when a connection is down. - // ready is closed and becomes nil when a new transport is up or failed - // due to timeout. - ready chan struct{} - transport transport.ClientTransport - - // The reason this addrConn is torn down. - tearDownErr error -} - -// printf records an event in ac's event log, unless ac has been closed. -// REQUIRES ac.mu is held. -func (ac *addrConn) printf(format string, a ...interface{}) { - if ac.events != nil { - ac.events.Printf(format, a...) - } -} - -// errorf records an error in ac's event log, unless ac has been closed. -// REQUIRES ac.mu is held. -func (ac *addrConn) errorf(format string, a ...interface{}) { - if ac.events != nil { - ac.events.Errorf(format, a...) - } -} - -// getState returns the connectivity state of the Conn -func (ac *addrConn) getState() ConnectivityState { - ac.mu.Lock() - defer ac.mu.Unlock() - return ac.state -} - -// waitForStateChange blocks until the state changes to something other than the sourceState. -func (ac *addrConn) waitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) { - ac.mu.Lock() - defer ac.mu.Unlock() - if sourceState != ac.state { - return ac.state, nil - } - done := make(chan struct{}) - var err error - go func() { - select { - case <-ctx.Done(): - ac.mu.Lock() - err = ctx.Err() - ac.stateCV.Broadcast() - ac.mu.Unlock() - case <-done: - } - }() - defer close(done) - for sourceState == ac.state { - ac.stateCV.Wait() - if err != nil { - return ac.state, err - } - } - return ac.state, nil -} - -func (ac *addrConn) resetTransport(closeTransport bool) error { - for retries := 0; ; retries++ { - ac.mu.Lock() - ac.printf("connecting") - if ac.state == Shutdown { - // ac.tearDown(...) has been invoked. - ac.mu.Unlock() - return errConnClosing - } - if ac.down != nil { - ac.down(downErrorf(false, true, "%v", errNetworkIO)) - ac.down = nil - } - ac.state = Connecting - ac.stateCV.Broadcast() - t := ac.transport - ac.mu.Unlock() - if closeTransport && t != nil { - t.Close() - } - sleepTime := ac.dopts.bs.backoff(retries) - timeout := minConnectTimeout - if timeout < sleepTime { - timeout = sleepTime - } - ctx, cancel := context.WithTimeout(ac.ctx, timeout) - connectTime := time.Now() - newTransport, err := transport.NewClientTransport(ctx, ac.addr.Addr, ac.dopts.copts) - if err != nil { - cancel() - - if e, ok := err.(transport.ConnectionError); ok && !e.Temporary() { - return err - } - grpclog.Printf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, ac.addr) - ac.mu.Lock() - if ac.state == Shutdown { - // ac.tearDown(...) has been invoked. - ac.mu.Unlock() - return errConnClosing - } - ac.errorf("transient failure: %v", err) - ac.state = TransientFailure - ac.stateCV.Broadcast() - if ac.ready != nil { - close(ac.ready) - ac.ready = nil - } - ac.mu.Unlock() - closeTransport = false - select { - case <-time.After(sleepTime - time.Since(connectTime)): - case <-ac.ctx.Done(): - return ac.ctx.Err() - } - continue - } - ac.mu.Lock() - ac.printf("ready") - if ac.state == Shutdown { - // ac.tearDown(...) has been invoked. - ac.mu.Unlock() - newTransport.Close() - return errConnClosing - } - ac.state = Ready - ac.stateCV.Broadcast() - ac.transport = newTransport - if ac.ready != nil { - close(ac.ready) - ac.ready = nil - } - if ac.cc.dopts.balancer != nil { - ac.down = ac.cc.dopts.balancer.Up(ac.addr) - } - ac.mu.Unlock() - return nil - } -} - -// Run in a goroutine to track the error in transport and create the -// new transport if an error happens. It returns when the channel is closing. -func (ac *addrConn) transportMonitor() { - for { - ac.mu.Lock() - t := ac.transport - ac.mu.Unlock() - select { - // This is needed to detect the teardown when - // the addrConn is idle (i.e., no RPC in flight). - case <-ac.ctx.Done(): - select { - case <-t.Error(): - t.Close() - default: - } - return - case <-t.GoAway(): - // If GoAway happens without any network I/O error, ac is closed without shutting down the - // underlying transport (the transport will be closed when all the pending RPCs finished or - // failed.). - // If GoAway and some network I/O error happen concurrently, ac and its underlying transport - // are closed. - // In both cases, a new ac is created. - select { - case <-t.Error(): - ac.cc.resetAddrConn(ac.addr, true, errNetworkIO) - default: - ac.cc.resetAddrConn(ac.addr, true, errConnDrain) - } - return - case <-t.Error(): - select { - case <-ac.ctx.Done(): - t.Close() - return - case <-t.GoAway(): - ac.cc.resetAddrConn(ac.addr, true, errNetworkIO) - return - default: - } - ac.mu.Lock() - if ac.state == Shutdown { - // ac has been shutdown. - ac.mu.Unlock() - return - } - ac.state = TransientFailure - ac.stateCV.Broadcast() - ac.mu.Unlock() - if err := ac.resetTransport(true); err != nil { - ac.mu.Lock() - ac.printf("transport exiting: %v", err) - ac.mu.Unlock() - grpclog.Printf("grpc: addrConn.transportMonitor exits due to: %v", err) - if err != errConnClosing { - // Keep this ac in cc.conns, to get the reason it's torn down. - ac.tearDown(err) - } - return - } - } - } -} - -// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or -// iv) transport is in TransientFailure and there's no balancer/failfast is true. -func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) { - for { - ac.mu.Lock() - switch { - case ac.state == Shutdown: - if failfast || !hasBalancer { - // RPC is failfast or balancer is nil. This RPC should fail with ac.tearDownErr. - err := ac.tearDownErr - ac.mu.Unlock() - return nil, err - } - ac.mu.Unlock() - return nil, errConnClosing - case ac.state == Ready: - ct := ac.transport - ac.mu.Unlock() - return ct, nil - case ac.state == TransientFailure: - if failfast || hasBalancer { - ac.mu.Unlock() - return nil, errConnUnavailable - } - } - ready := ac.ready - if ready == nil { - ready = make(chan struct{}) - ac.ready = ready - } - ac.mu.Unlock() - select { - case <-ctx.Done(): - return nil, toRPCErr(ctx.Err()) - // Wait until the new transport is ready or failed. - case <-ready: - } - } -} - -// tearDown starts to tear down the addrConn. -// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in -// some edge cases (e.g., the caller opens and closes many addrConn's in a -// tight loop. -// tearDown doesn't remove ac from ac.cc.conns. -func (ac *addrConn) tearDown(err error) { - ac.cancel() - - ac.mu.Lock() - defer ac.mu.Unlock() - if ac.down != nil { - ac.down(downErrorf(false, false, "%v", err)) - ac.down = nil - } - if err == errConnDrain && ac.transport != nil { - // GracefulClose(...) may be executed multiple times when - // i) receiving multiple GoAway frames from the server; or - // ii) there are concurrent name resolver/Balancer triggered - // address removal and GoAway. - ac.transport.GracefulClose() - } - if ac.state == Shutdown { - return - } - ac.state = Shutdown - ac.tearDownErr = err - ac.stateCV.Broadcast() - if ac.events != nil { - ac.events.Finish() - ac.events = nil - } - if ac.ready != nil { - close(ac.ready) - ac.ready = nil - } - if ac.transport != nil && err != errConnDrain { - ac.transport.Close() - } - return -} diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh deleted file mode 100755 index b009488842..0000000000 --- a/vendor/google.golang.org/grpc/codegen.sh +++ /dev/null @@ -1,17 +0,0 @@ -#!/bin/bash - -# This script serves as an example to demonstrate how to generate the gRPC-Go -# interface and the related messages from .proto file. -# -# It assumes the installation of i) Google proto buffer compiler at -# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen -# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have -# not, please install them first. -# -# We recommend running this script at $GOPATH/src. -# -# If this is not what you need, feel free to make your own scripts. Again, this -# script is for demonstration purpose. -# -proto=$1 -protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go deleted file mode 100644 index e6762d0845..0000000000 --- a/vendor/google.golang.org/grpc/codes/code_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// generated by stringer -type=Code; DO NOT EDIT - -package codes - -import "fmt" - -const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated" - -var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192} - -func (i Code) String() string { - if i+1 >= Code(len(_Code_index)) { - return fmt.Sprintf("Code(%d)", i) - } - return _Code_name[_Code_index[i]:_Code_index[i+1]] -} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go deleted file mode 100644 index e14b464acf..0000000000 --- a/vendor/google.golang.org/grpc/codes/codes.go +++ /dev/null @@ -1,159 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -// Package codes defines the canonical error codes used by gRPC. It is -// consistent across various languages. -package codes // import "google.golang.org/grpc/codes" - -// A Code is an unsigned 32-bit error code as defined in the gRPC spec. -type Code uint32 - -//go:generate stringer -type=Code - -const ( - // OK is returned on success. - OK Code = 0 - - // Canceled indicates the operation was cancelled (typically by the caller). - Canceled Code = 1 - - // Unknown error. An example of where this error may be returned is - // if a Status value received from another address space belongs to - // an error-space that is not known in this address space. Also - // errors raised by APIs that do not return enough error information - // may be converted to this error. - Unknown Code = 2 - - // InvalidArgument indicates client specified an invalid argument. - // Note that this differs from FailedPrecondition. It indicates arguments - // that are problematic regardless of the state of the system - // (e.g., a malformed file name). - InvalidArgument Code = 3 - - // DeadlineExceeded means operation expired before completion. - // For operations that change the state of the system, this error may be - // returned even if the operation has completed successfully. For - // example, a successful response from a server could have been delayed - // long enough for the deadline to expire. - DeadlineExceeded Code = 4 - - // NotFound means some requested entity (e.g., file or directory) was - // not found. - NotFound Code = 5 - - // AlreadyExists means an attempt to create an entity failed because one - // already exists. - AlreadyExists Code = 6 - - // PermissionDenied indicates the caller does not have permission to - // execute the specified operation. It must not be used for rejections - // caused by exhausting some resource (use ResourceExhausted - // instead for those errors). It must not be - // used if the caller cannot be identified (use Unauthenticated - // instead for those errors). - PermissionDenied Code = 7 - - // Unauthenticated indicates the request does not have valid - // authentication credentials for the operation. - Unauthenticated Code = 16 - - // ResourceExhausted indicates some resource has been exhausted, perhaps - // a per-user quota, or perhaps the entire file system is out of space. - ResourceExhausted Code = 8 - - // FailedPrecondition indicates operation was rejected because the - // system is not in a state required for the operation's execution. - // For example, directory to be deleted may be non-empty, an rmdir - // operation is applied to a non-directory, etc. - // - // A litmus test that may help a service implementor in deciding - // between FailedPrecondition, Aborted, and Unavailable: - // (a) Use Unavailable if the client can retry just the failing call. - // (b) Use Aborted if the client should retry at a higher-level - // (e.g., restarting a read-modify-write sequence). - // (c) Use FailedPrecondition if the client should not retry until - // the system state has been explicitly fixed. E.g., if an "rmdir" - // fails because the directory is non-empty, FailedPrecondition - // should be returned since the client should not retry unless - // they have first fixed up the directory by deleting files from it. - // (d) Use FailedPrecondition if the client performs conditional - // REST Get/Update/Delete on a resource and the resource on the - // server does not match the condition. E.g., conflicting - // read-modify-write on the same resource. - FailedPrecondition Code = 9 - - // Aborted indicates the operation was aborted, typically due to a - // concurrency issue like sequencer check failures, transaction aborts, - // etc. - // - // See litmus test above for deciding between FailedPrecondition, - // Aborted, and Unavailable. - Aborted Code = 10 - - // OutOfRange means operation was attempted past the valid range. - // E.g., seeking or reading past end of file. - // - // Unlike InvalidArgument, this error indicates a problem that may - // be fixed if the system state changes. For example, a 32-bit file - // system will generate InvalidArgument if asked to read at an - // offset that is not in the range [0,2^32-1], but it will generate - // OutOfRange if asked to read from an offset past the current - // file size. - // - // There is a fair bit of overlap between FailedPrecondition and - // OutOfRange. We recommend using OutOfRange (the more specific - // error) when it applies so that callers who are iterating through - // a space can easily look for an OutOfRange error to detect when - // they are done. - OutOfRange Code = 11 - - // Unimplemented indicates operation is not implemented or not - // supported/enabled in this service. - Unimplemented Code = 12 - - // Internal errors. Means some invariants expected by underlying - // system has been broken. If you see one of these errors, - // something is very broken. - Internal Code = 13 - - // Unavailable indicates the service is currently unavailable. - // This is a most likely a transient condition and may be corrected - // by retrying with a backoff. - // - // See litmus test above for deciding between FailedPrecondition, - // Aborted, and Unavailable. - Unavailable Code = 14 - - // DataLoss indicates unrecoverable data loss or corruption. - DataLoss Code = 15 -) diff --git a/vendor/google.golang.org/grpc/coverage.sh b/vendor/google.golang.org/grpc/coverage.sh deleted file mode 100755 index 120235374a..0000000000 --- a/vendor/google.golang.org/grpc/coverage.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash - -set -e - -workdir=.cover -profile="$workdir/cover.out" -mode=set -end2endtest="google.golang.org/grpc/test" - -generate_cover_data() { - rm -rf "$workdir" - mkdir "$workdir" - - for pkg in "$@"; do - if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ] - then - f="$workdir/$(echo $pkg | tr / -)" - go test -covermode="$mode" -coverprofile="$f.cover" "$pkg" - go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest" - fi - done - - echo "mode: $mode" >"$profile" - grep -h -v "^mode:" "$workdir"/*.cover >>"$profile" -} - -show_cover_report() { - go tool cover -${1}="$profile" -} - -push_to_coveralls() { - goveralls -coverprofile="$profile" -} - -generate_cover_data $(go list ./...) -show_cover_report func -case "$1" in -"") - ;; ---html) - show_cover_report html ;; ---coveralls) - push_to_coveralls ;; -*) - echo >&2 "error: invalid option: $1" ;; -esac -rm -rf "$workdir" diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go deleted file mode 100644 index 13be45742d..0000000000 --- a/vendor/google.golang.org/grpc/credentials/credentials.go +++ /dev/null @@ -1,220 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -// Package credentials implements various credentials supported by gRPC library, -// which encapsulate all the state needed by a client to authenticate with a -// server and make various assertions, e.g., about the client's identity, role, -// or whether it is authorized to make a particular call. -package credentials // import "google.golang.org/grpc/credentials" - -import ( - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "io/ioutil" - "net" - "strings" - - "golang.org/x/net/context" -) - -var ( - // alpnProtoStr are the specified application level protocols for gRPC. - alpnProtoStr = []string{"h2"} -) - -// PerRPCCredentials defines the common interface for the credentials which need to -// attach security information to every RPC (e.g., oauth2). -type PerRPCCredentials interface { - // GetRequestMetadata gets the current request metadata, refreshing - // tokens if required. This should be called by the transport layer on - // each request, and the data should be populated in headers or other - // context. uri is the URI of the entry point for the request. When - // supported by the underlying implementation, ctx can be used for - // timeout and cancellation. - // TODO(zhaoq): Define the set of the qualified keys instead of leaving - // it as an arbitrary string. - GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) - // RequireTransportSecurity indicates whether the credentials requires - // transport security. - RequireTransportSecurity() bool -} - -// ProtocolInfo provides information regarding the gRPC wire protocol version, -// security protocol, security protocol version in use, etc. -type ProtocolInfo struct { - // ProtocolVersion is the gRPC wire protocol version. - ProtocolVersion string - // SecurityProtocol is the security protocol in use. - SecurityProtocol string - // SecurityVersion is the security protocol version. - SecurityVersion string -} - -// AuthInfo defines the common interface for the auth information the users are interested in. -type AuthInfo interface { - AuthType() string -} - -var ( - // ErrConnDispatched indicates that rawConn has been dispatched out of gRPC - // and the caller should not close rawConn. - ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") -) - -// TransportCredentials defines the common interface for all the live gRPC wire -// protocols and supported transport security protocols (e.g., TLS, SSL). -type TransportCredentials interface { - // ClientHandshake does the authentication handshake specified by the corresponding - // authentication protocol on rawConn for clients. It returns the authenticated - // connection and the corresponding auth information about the connection. - // Implementations must use the provided context to implement timely cancellation. - ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) - // ServerHandshake does the authentication handshake for servers. It returns - // the authenticated connection and the corresponding auth information about - // the connection. - ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) - // Info provides the ProtocolInfo of this TransportCredentials. - Info() ProtocolInfo -} - -// TLSInfo contains the auth information for a TLS authenticated connection. -// It implements the AuthInfo interface. -type TLSInfo struct { - State tls.ConnectionState -} - -// AuthType returns the type of TLSInfo as a string. -func (t TLSInfo) AuthType() string { - return "tls" -} - -// tlsCreds is the credentials required for authenticating a connection using TLS. -type tlsCreds struct { - // TLS configuration - config *tls.Config -} - -func (c tlsCreds) Info() ProtocolInfo { - return ProtocolInfo{ - SecurityProtocol: "tls", - SecurityVersion: "1.2", - } -} - -// GetRequestMetadata returns nil, nil since TLS credentials does not have -// metadata. -func (c *tlsCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) { - return nil, nil -} - -func (c *tlsCreds) RequireTransportSecurity() bool { - return true -} - -func (c *tlsCreds) ClientHandshake(ctx context.Context, addr string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { - // use local cfg to avoid clobbering ServerName if using multiple endpoints - cfg := cloneTLSConfig(c.config) - if cfg.ServerName == "" { - colonPos := strings.LastIndex(addr, ":") - if colonPos == -1 { - colonPos = len(addr) - } - cfg.ServerName = addr[:colonPos] - } - conn := tls.Client(rawConn, cfg) - errChannel := make(chan error, 1) - go func() { - errChannel <- conn.Handshake() - }() - select { - case err := <-errChannel: - if err != nil { - return nil, nil, err - } - case <-ctx.Done(): - return nil, nil, ctx.Err() - } - // TODO(zhaoq): Omit the auth info for client now. It is more for - // information than anything else. - return conn, nil, nil -} - -func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { - conn := tls.Server(rawConn, c.config) - if err := conn.Handshake(); err != nil { - return nil, nil, err - } - return conn, TLSInfo{conn.ConnectionState()}, nil -} - -// NewTLS uses c to construct a TransportCredentials based on TLS. -func NewTLS(c *tls.Config) TransportCredentials { - tc := &tlsCreds{cloneTLSConfig(c)} - tc.config.NextProtos = alpnProtoStr - return tc -} - -// NewClientTLSFromCert constructs a TLS from the input certificate for client. -func NewClientTLSFromCert(cp *x509.CertPool, serverName string) TransportCredentials { - return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}) -} - -// NewClientTLSFromFile constructs a TLS from the input certificate file for client. -func NewClientTLSFromFile(certFile, serverName string) (TransportCredentials, error) { - b, err := ioutil.ReadFile(certFile) - if err != nil { - return nil, err - } - cp := x509.NewCertPool() - if !cp.AppendCertsFromPEM(b) { - return nil, fmt.Errorf("credentials: failed to append certificates") - } - return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}), nil -} - -// NewServerTLSFromCert constructs a TLS from the input certificate for server. -func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { - return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) -} - -// NewServerTLSFromFile constructs a TLS from the input certificate file and key -// file for server. -func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { - cert, err := tls.LoadX509KeyPair(certFile, keyFile) - if err != nil { - return nil, err - } - return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil -} diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go deleted file mode 100644 index 9647b9ec83..0000000000 --- a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go +++ /dev/null @@ -1,76 +0,0 @@ -// +build go1.7 - -/* - * - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package credentials - -import ( - "crypto/tls" -) - -// cloneTLSConfig returns a shallow clone of the exported -// fields of cfg, ignoring the unexported sync.Once, which -// contains a mutex and must not be copied. -// -// If cfg is nil, a new zero tls.Config is returned. -// -// TODO replace this function with official clone function. -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - return &tls.Config{ - Rand: cfg.Rand, - Time: cfg.Time, - Certificates: cfg.Certificates, - NameToCertificate: cfg.NameToCertificate, - GetCertificate: cfg.GetCertificate, - RootCAs: cfg.RootCAs, - NextProtos: cfg.NextProtos, - ServerName: cfg.ServerName, - ClientAuth: cfg.ClientAuth, - ClientCAs: cfg.ClientCAs, - InsecureSkipVerify: cfg.InsecureSkipVerify, - CipherSuites: cfg.CipherSuites, - PreferServerCipherSuites: cfg.PreferServerCipherSuites, - SessionTicketsDisabled: cfg.SessionTicketsDisabled, - SessionTicketKey: cfg.SessionTicketKey, - ClientSessionCache: cfg.ClientSessionCache, - MinVersion: cfg.MinVersion, - MaxVersion: cfg.MaxVersion, - CurvePreferences: cfg.CurvePreferences, - DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled, - Renegotiation: cfg.Renegotiation, - } -} diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go deleted file mode 100644 index 09b8d12c79..0000000000 --- a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go +++ /dev/null @@ -1,74 +0,0 @@ -// +build !go1.7 - -/* - * - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package credentials - -import ( - "crypto/tls" -) - -// cloneTLSConfig returns a shallow clone of the exported -// fields of cfg, ignoring the unexported sync.Once, which -// contains a mutex and must not be copied. -// -// If cfg is nil, a new zero tls.Config is returned. -// -// TODO replace this function with official clone function. -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - return &tls.Config{ - Rand: cfg.Rand, - Time: cfg.Time, - Certificates: cfg.Certificates, - NameToCertificate: cfg.NameToCertificate, - GetCertificate: cfg.GetCertificate, - RootCAs: cfg.RootCAs, - NextProtos: cfg.NextProtos, - ServerName: cfg.ServerName, - ClientAuth: cfg.ClientAuth, - ClientCAs: cfg.ClientCAs, - InsecureSkipVerify: cfg.InsecureSkipVerify, - CipherSuites: cfg.CipherSuites, - PreferServerCipherSuites: cfg.PreferServerCipherSuites, - SessionTicketsDisabled: cfg.SessionTicketsDisabled, - SessionTicketKey: cfg.SessionTicketKey, - ClientSessionCache: cfg.ClientSessionCache, - MinVersion: cfg.MinVersion, - MaxVersion: cfg.MaxVersion, - CurvePreferences: cfg.CurvePreferences, - } -} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go deleted file mode 100644 index a35f218852..0000000000 --- a/vendor/google.golang.org/grpc/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -/* -Package grpc implements an RPC system called gRPC. - -See www.grpc.io for more information about gRPC. -*/ -package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go deleted file mode 100644 index 3b2933079e..0000000000 --- a/vendor/google.golang.org/grpc/grpclog/logger.go +++ /dev/null @@ -1,93 +0,0 @@ -/* - * - * Copyright 2015, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -/* -Package grpclog defines logging for grpc. -*/ -package grpclog // import "google.golang.org/grpc/grpclog" - -import ( - "log" - "os" -) - -// Use golang's standard logger by default. -// Access is not mutex-protected: do not modify except in init() -// functions. -var logger Logger = log.New(os.Stderr, "", log.LstdFlags) - -// Logger mimics golang's standard Logger as an interface. -type Logger interface { - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatalln(args ...interface{}) - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) -} - -// SetLogger sets the logger that is used in grpc. Call only from -// init() functions. -func SetLogger(l Logger) { - logger = l -} - -// Fatal is equivalent to Print() followed by a call to os.Exit() with a non-zero exit code. -func Fatal(args ...interface{}) { - logger.Fatal(args...) -} - -// Fatalf is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code. -func Fatalf(format string, args ...interface{}) { - logger.Fatalf(format, args...) -} - -// Fatalln is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code. -func Fatalln(args ...interface{}) { - logger.Fatalln(args...) -} - -// Print prints to the logger. Arguments are handled in the manner of fmt.Print. -func Print(args ...interface{}) { - logger.Print(args...) -} - -// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. -func Printf(format string, args ...interface{}) { - logger.Printf(format, args...) -} - -// Println prints to the logger. Arguments are handled in the manner of fmt.Println. -func Println(args ...interface{}) { - logger.Println(args...) -} diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go deleted file mode 100644 index 588f59e5ab..0000000000 --- a/vendor/google.golang.org/grpc/interceptor.go +++ /dev/null @@ -1,74 +0,0 @@ -/* - * - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "golang.org/x/net/context" -) - -// UnaryServerInfo consists of various information about a unary RPC on -// server side. All per-rpc information may be mutated by the interceptor. -type UnaryServerInfo struct { - // Server is the service implementation the user provides. This is read-only. - Server interface{} - // FullMethod is the full RPC method string, i.e., /package.service/method. - FullMethod string -} - -// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal -// execution of a unary RPC. -type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) - -// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info -// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper -// of the service method implementation. It is the responsibility of the interceptor to invoke handler -// to complete the RPC. -type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) - -// StreamServerInfo consists of various information about a streaming RPC on -// server side. All per-rpc information may be mutated by the interceptor. -type StreamServerInfo struct { - // FullMethod is the full RPC method string, i.e., /package.service/method. - FullMethod string - // IsClientStream indicates whether the RPC is a client streaming RPC. - IsClientStream bool - // IsServerStream indicates whether the RPC is a server streaming RPC. - IsServerStream bool -} - -// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. -// info contains all the information of this RPC the interceptor can operate on. And handler is the -// service method implementation. It is the responsibility of the interceptor to invoke handler to -// complete the RPC. -type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go deleted file mode 100644 index 5489143a85..0000000000 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -// Package internal contains gRPC-internal code for testing, to avoid polluting -// the godoc of the top-level grpc package. -package internal - -// TestingCloseConns closes all existing transports but keeps -// grpcServer.lis accepting new connections. -// -// The provided grpcServer must be of type *grpc.Server. It is untyped -// for circular dependency reasons. -var TestingCloseConns func(grpcServer interface{}) - -// TestingUseHandlerImpl enables the http.Handler-based server implementation. -// It must be called before Serve and requires TLS credentials. -// -// The provided grpcServer must be of type *grpc.Server. It is untyped -// for circular dependency reasons. -var TestingUseHandlerImpl func(grpcServer interface{}) diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go deleted file mode 100644 index 954c0f77c6..0000000000 --- a/vendor/google.golang.org/grpc/metadata/metadata.go +++ /dev/null @@ -1,140 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -// Package metadata define the structure of the metadata supported by gRPC library. -package metadata // import "google.golang.org/grpc/metadata" - -import ( - "encoding/base64" - "fmt" - "strings" - - "golang.org/x/net/context" -) - -const ( - binHdrSuffix = "-bin" -) - -// encodeKeyValue encodes key and value qualified for transmission via gRPC. -// Transmitting binary headers violates HTTP/2 spec. -// TODO(zhaoq): Maybe check if k is ASCII also. -func encodeKeyValue(k, v string) (string, string) { - k = strings.ToLower(k) - if strings.HasSuffix(k, binHdrSuffix) { - val := base64.StdEncoding.EncodeToString([]byte(v)) - v = string(val) - } - return k, v -} - -// DecodeKeyValue returns the original key and value corresponding to the -// encoded data in k, v. -// If k is a binary header and v contains comma, v is split on comma before decoded, -// and the decoded v will be joined with comma before returned. -func DecodeKeyValue(k, v string) (string, string, error) { - if !strings.HasSuffix(k, binHdrSuffix) { - return k, v, nil - } - vvs := strings.Split(v, ",") - for i, vv := range vvs { - val, err := base64.StdEncoding.DecodeString(vv) - if err != nil { - return "", "", err - } - vvs[i] = string(val) - } - return k, strings.Join(vvs, ","), nil -} - -// MD is a mapping from metadata keys to values. Users should use the following -// two convenience functions New and Pairs to generate MD. -type MD map[string][]string - -// New creates a MD from given key-value map. -func New(m map[string]string) MD { - md := MD{} - for k, v := range m { - key, val := encodeKeyValue(k, v) - md[key] = append(md[key], val) - } - return md -} - -// Pairs returns an MD formed by the mapping of key, value ... -// Pairs panics if len(kv) is odd. -func Pairs(kv ...string) MD { - if len(kv)%2 == 1 { - panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) - } - md := MD{} - var k string - for i, s := range kv { - if i%2 == 0 { - k = s - continue - } - key, val := encodeKeyValue(k, s) - md[key] = append(md[key], val) - } - return md -} - -// Len returns the number of items in md. -func (md MD) Len() int { - return len(md) -} - -// Copy returns a copy of md. -func (md MD) Copy() MD { - out := MD{} - for k, v := range md { - for _, i := range v { - out[k] = append(out[k], i) - } - } - return out -} - -type mdKey struct{} - -// NewContext creates a new context with md attached. -func NewContext(ctx context.Context, md MD) context.Context { - return context.WithValue(ctx, mdKey{}, md) -} - -// FromContext returns the MD in ctx if it exists. -func FromContext(ctx context.Context) (md MD, ok bool) { - md, ok = ctx.Value(mdKey{}).(MD) - return -} diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go deleted file mode 100644 index c2e0871e6f..0000000000 --- a/vendor/google.golang.org/grpc/naming/naming.go +++ /dev/null @@ -1,74 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -// Package naming defines the naming API and related data structures for gRPC. -// The interface is EXPERIMENTAL and may be suject to change. -package naming - -// Operation defines the corresponding operations for a name resolution change. -type Operation uint8 - -const ( - // Add indicates a new address is added. - Add Operation = iota - // Delete indicates an exisiting address is deleted. - Delete -) - -// Update defines a name resolution update. Notice that it is not valid having both -// empty string Addr and nil Metadata in an Update. -type Update struct { - // Op indicates the operation of the update. - Op Operation - // Addr is the updated address. It is empty string if there is no address update. - Addr string - // Metadata is the updated metadata. It is nil if there is no metadata update. - // Metadata is not required for a custom naming implementation. - Metadata interface{} -} - -// Resolver creates a Watcher for a target to track its resolution changes. -type Resolver interface { - // Resolve creates a Watcher for target. - Resolve(target string) (Watcher, error) -} - -// Watcher watches for the updates on the specified target. -type Watcher interface { - // Next blocks until an update or error happens. It may return one or more - // updates. The first call should get the full set of the results. It should - // return an error if and only if Watcher cannot recover. - Next() ([]*Update, error) - // Close closes the Watcher. - Close() -} diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go deleted file mode 100644 index bfa6205ba9..0000000000 --- a/vendor/google.golang.org/grpc/peer/peer.go +++ /dev/null @@ -1,65 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -// Package peer defines various peer information associated with RPCs and -// corresponding utils. -package peer - -import ( - "net" - - "golang.org/x/net/context" - "google.golang.org/grpc/credentials" -) - -// Peer contains the information of the peer for an RPC. -type Peer struct { - // Addr is the peer address. - Addr net.Addr - // AuthInfo is the authentication information of the transport. - // It is nil if there is no transport security being used. - AuthInfo credentials.AuthInfo -} - -type peerKey struct{} - -// NewContext creates a new context with peer information attached. -func NewContext(ctx context.Context, p *Peer) context.Context { - return context.WithValue(ctx, peerKey{}, p) -} - -// FromContext returns the peer information in ctx if it exists. -func FromContext(ctx context.Context) (p *Peer, ok bool) { - p, ok = ctx.Value(peerKey{}).(*Peer) - return -} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go deleted file mode 100644 index 35ac9cc7b0..0000000000 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ /dev/null @@ -1,457 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "bytes" - "compress/gzip" - "encoding/binary" - "fmt" - "io" - "io/ioutil" - "math" - "os" - - "github.com/golang/protobuf/proto" - "golang.org/x/net/context" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/transport" -) - -// Codec defines the interface gRPC uses to encode and decode messages. -type Codec interface { - // Marshal returns the wire format of v. - Marshal(v interface{}) ([]byte, error) - // Unmarshal parses the wire format into v. - Unmarshal(data []byte, v interface{}) error - // String returns the name of the Codec implementation. The returned - // string will be used as part of content type in transmission. - String() string -} - -// protoCodec is a Codec implementation with protobuf. It is the default codec for gRPC. -type protoCodec struct{} - -func (protoCodec) Marshal(v interface{}) ([]byte, error) { - return proto.Marshal(v.(proto.Message)) -} - -func (protoCodec) Unmarshal(data []byte, v interface{}) error { - return proto.Unmarshal(data, v.(proto.Message)) -} - -func (protoCodec) String() string { - return "proto" -} - -// Compressor defines the interface gRPC uses to compress a message. -type Compressor interface { - // Do compresses p into w. - Do(w io.Writer, p []byte) error - // Type returns the compression algorithm the Compressor uses. - Type() string -} - -// NewGZIPCompressor creates a Compressor based on GZIP. -func NewGZIPCompressor() Compressor { - return &gzipCompressor{} -} - -type gzipCompressor struct { -} - -func (c *gzipCompressor) Do(w io.Writer, p []byte) error { - z := gzip.NewWriter(w) - if _, err := z.Write(p); err != nil { - return err - } - return z.Close() -} - -func (c *gzipCompressor) Type() string { - return "gzip" -} - -// Decompressor defines the interface gRPC uses to decompress a message. -type Decompressor interface { - // Do reads the data from r and uncompress them. - Do(r io.Reader) ([]byte, error) - // Type returns the compression algorithm the Decompressor uses. - Type() string -} - -type gzipDecompressor struct { -} - -// NewGZIPDecompressor creates a Decompressor based on GZIP. -func NewGZIPDecompressor() Decompressor { - return &gzipDecompressor{} -} - -func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { - z, err := gzip.NewReader(r) - if err != nil { - return nil, err - } - defer z.Close() - return ioutil.ReadAll(z) -} - -func (d *gzipDecompressor) Type() string { - return "gzip" -} - -// callInfo contains all related configuration and information about an RPC. -type callInfo struct { - failFast bool - headerMD metadata.MD - trailerMD metadata.MD - traceInfo traceInfo // in trace.go -} - -var defaultCallInfo = callInfo{failFast: true} - -// CallOption configures a Call before it starts or extracts information from -// a Call after it completes. -type CallOption interface { - // before is called before the call is sent to any server. If before - // returns a non-nil error, the RPC fails with that error. - before(*callInfo) error - - // after is called after the call has completed. after cannot return an - // error, so any failures should be reported via output parameters. - after(*callInfo) -} - -type beforeCall func(c *callInfo) error - -func (o beforeCall) before(c *callInfo) error { return o(c) } -func (o beforeCall) after(c *callInfo) {} - -type afterCall func(c *callInfo) - -func (o afterCall) before(c *callInfo) error { return nil } -func (o afterCall) after(c *callInfo) { o(c) } - -// Header returns a CallOptions that retrieves the header metadata -// for a unary RPC. -func Header(md *metadata.MD) CallOption { - return afterCall(func(c *callInfo) { - *md = c.headerMD - }) -} - -// Trailer returns a CallOptions that retrieves the trailer metadata -// for a unary RPC. -func Trailer(md *metadata.MD) CallOption { - return afterCall(func(c *callInfo) { - *md = c.trailerMD - }) -} - -// FailFast configures the action to take when an RPC is attempted on broken -// connections or unreachable servers. If failfast is true, the RPC will fail -// immediately. Otherwise, the RPC client will block the call until a -// connection is available (or the call is canceled or times out) and will retry -// the call if it fails due to a transient error. Please refer to -// https://github.com/grpc/grpc/blob/master/doc/fail_fast.md -func FailFast(failFast bool) CallOption { - return beforeCall(func(c *callInfo) error { - c.failFast = failFast - return nil - }) -} - -// The format of the payload: compressed or not? -type payloadFormat uint8 - -const ( - compressionNone payloadFormat = iota // no compression - compressionMade -) - -// parser reads complete gRPC messages from the underlying reader. -type parser struct { - // r is the underlying reader. - // See the comment on recvMsg for the permissible - // error types. - r io.Reader - - // The header of a gRPC message. Find more detail - // at http://www.grpc.io/docs/guides/wire.html. - header [5]byte -} - -// recvMsg reads a complete gRPC message from the stream. -// -// It returns the message and its payload (compression/encoding) -// format. The caller owns the returned msg memory. -// -// If there is an error, possible values are: -// * io.EOF, when no messages remain -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * of type transport.StreamError -// No other error values or types must be returned, which also means -// that the underlying io.Reader must not return an incompatible -// error. -func (p *parser) recvMsg(maxMsgSize int) (pf payloadFormat, msg []byte, err error) { - if _, err := io.ReadFull(p.r, p.header[:]); err != nil { - return 0, nil, err - } - - pf = payloadFormat(p.header[0]) - length := binary.BigEndian.Uint32(p.header[1:]) - - if length == 0 { - return pf, nil, nil - } - if length > uint32(maxMsgSize) { - return 0, nil, Errorf(codes.Internal, "grpc: received message length %d exceeding the max size %d", length, maxMsgSize) - } - // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead - // of making it for each message: - msg = make([]byte, int(length)) - if _, err := io.ReadFull(p.r, msg); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return 0, nil, err - } - return pf, msg, nil -} - -// encode serializes msg and prepends the message header. If msg is nil, it -// generates the message header of 0 message length. -func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer) ([]byte, error) { - var b []byte - var length uint - if msg != nil { - var err error - // TODO(zhaoq): optimize to reduce memory alloc and copying. - b, err = c.Marshal(msg) - if err != nil { - return nil, err - } - if cp != nil { - if err := cp.Do(cbuf, b); err != nil { - return nil, err - } - b = cbuf.Bytes() - } - length = uint(len(b)) - } - if length > math.MaxUint32 { - return nil, Errorf(codes.InvalidArgument, "grpc: message too large (%d bytes)", length) - } - - const ( - payloadLen = 1 - sizeLen = 4 - ) - - var buf = make([]byte, payloadLen+sizeLen+len(b)) - - // Write payload format - if cp == nil { - buf[0] = byte(compressionNone) - } else { - buf[0] = byte(compressionMade) - } - // Write length of b into buf - binary.BigEndian.PutUint32(buf[1:], uint32(length)) - // Copy encoded msg to buf - copy(buf[5:], b) - - return buf, nil -} - -func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error { - switch pf { - case compressionNone: - case compressionMade: - if dc == nil || recvCompress != dc.Type() { - return transport.StreamErrorf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) - } - default: - return transport.StreamErrorf(codes.Internal, "grpc: received unexpected payload format %d", pf) - } - return nil -} - -func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}, maxMsgSize int) error { - pf, d, err := p.recvMsg(maxMsgSize) - if err != nil { - return err - } - if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil { - return err - } - if pf == compressionMade { - d, err = dc.Do(bytes.NewReader(d)) - if err != nil { - return Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) - } - } - if len(d) > maxMsgSize { - // TODO: Revisit the error code. Currently keep it consistent with java - // implementation. - return Errorf(codes.Internal, "grpc: received a message of %d bytes exceeding %d limit", len(d), maxMsgSize) - } - if err := c.Unmarshal(d, m); err != nil { - return Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) - } - return nil -} - -// rpcError defines the status from an RPC. -type rpcError struct { - code codes.Code - desc string -} - -func (e *rpcError) Error() string { - return fmt.Sprintf("rpc error: code = %d desc = %s", e.code, e.desc) -} - -// Code returns the error code for err if it was produced by the rpc system. -// Otherwise, it returns codes.Unknown. -func Code(err error) codes.Code { - if err == nil { - return codes.OK - } - if e, ok := err.(*rpcError); ok { - return e.code - } - return codes.Unknown -} - -// ErrorDesc returns the error description of err if it was produced by the rpc system. -// Otherwise, it returns err.Error() or empty string when err is nil. -func ErrorDesc(err error) string { - if err == nil { - return "" - } - if e, ok := err.(*rpcError); ok { - return e.desc - } - return err.Error() -} - -// Errorf returns an error containing an error code and a description; -// Errorf returns nil if c is OK. -func Errorf(c codes.Code, format string, a ...interface{}) error { - if c == codes.OK { - return nil - } - return &rpcError{ - code: c, - desc: fmt.Sprintf(format, a...), - } -} - -// toRPCErr converts an error into a rpcError. -func toRPCErr(err error) error { - switch e := err.(type) { - case *rpcError: - return err - case transport.StreamError: - return &rpcError{ - code: e.Code, - desc: e.Desc, - } - case transport.ConnectionError: - return &rpcError{ - code: codes.Internal, - desc: e.Desc, - } - default: - switch err { - case context.DeadlineExceeded: - return &rpcError{ - code: codes.DeadlineExceeded, - desc: err.Error(), - } - case context.Canceled: - return &rpcError{ - code: codes.Canceled, - desc: err.Error(), - } - case ErrClientConnClosing: - return &rpcError{ - code: codes.FailedPrecondition, - desc: err.Error(), - } - } - - } - return Errorf(codes.Unknown, "%v", err) -} - -// convertCode converts a standard Go error into its canonical code. Note that -// this is only used to translate the error returned by the server applications. -func convertCode(err error) codes.Code { - switch err { - case nil: - return codes.OK - case io.EOF: - return codes.OutOfRange - case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF: - return codes.FailedPrecondition - case os.ErrInvalid: - return codes.InvalidArgument - case context.Canceled: - return codes.Canceled - case context.DeadlineExceeded: - return codes.DeadlineExceeded - } - switch { - case os.IsExist(err): - return codes.AlreadyExists - case os.IsNotExist(err): - return codes.NotFound - case os.IsPermission(err): - return codes.PermissionDenied - } - return codes.Unknown -} - -// SupportPackageIsVersion3 is referenced from generated protocol buffer files -// to assert that that code is compatible with this version of the grpc package. -// -// This constant may be renamed in the future if a change in the generated code -// requires a synchronised update of grpc-go and protoc-gen-go. This constant -// should not be referenced from any other code. -const SupportPackageIsVersion3 = true diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go deleted file mode 100644 index b2a825ad0b..0000000000 --- a/vendor/google.golang.org/grpc/server.go +++ /dev/null @@ -1,897 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "net/http" - "reflect" - "runtime" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/http2" - "golang.org/x/net/trace" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/internal" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/transport" -) - -type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) - -// MethodDesc represents an RPC service's method specification. -type MethodDesc struct { - MethodName string - Handler methodHandler -} - -// ServiceDesc represents an RPC service's specification. -type ServiceDesc struct { - ServiceName string - // The pointer to the service interface. Used to check whether the user - // provided implementation satisfies the interface requirements. - HandlerType interface{} - Methods []MethodDesc - Streams []StreamDesc - Metadata interface{} -} - -// service consists of the information of the server serving this service and -// the methods in this service. -type service struct { - server interface{} // the server for service methods - md map[string]*MethodDesc - sd map[string]*StreamDesc - mdata interface{} -} - -// Server is a gRPC server to serve RPC requests. -type Server struct { - opts options - - mu sync.Mutex // guards following - lis map[net.Listener]bool - conns map[io.Closer]bool - drain bool - // A CondVar to let GracefulStop() blocks until all the pending RPCs are finished - // and all the transport goes away. - cv *sync.Cond - m map[string]*service // service name -> service info - events trace.EventLog -} - -type options struct { - creds credentials.TransportCredentials - codec Codec - cp Compressor - dc Decompressor - maxMsgSize int - unaryInt UnaryServerInterceptor - streamInt StreamServerInterceptor - maxConcurrentStreams uint32 - useHandlerImpl bool // use http.Handler-based server -} - -var defaultMaxMsgSize = 1024 * 1024 * 4 // use 4MB as the default message size limit - -// A ServerOption sets options. -type ServerOption func(*options) - -// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. -func CustomCodec(codec Codec) ServerOption { - return func(o *options) { - o.codec = codec - } -} - -// RPCCompressor returns a ServerOption that sets a compressor for outbound messages. -func RPCCompressor(cp Compressor) ServerOption { - return func(o *options) { - o.cp = cp - } -} - -// RPCDecompressor returns a ServerOption that sets a decompressor for inbound messages. -func RPCDecompressor(dc Decompressor) ServerOption { - return func(o *options) { - o.dc = dc - } -} - -// MaxMsgSize returns a ServerOption to set the max message size in bytes for inbound mesages. -// If this is not set, gRPC uses the default 4MB. -func MaxMsgSize(m int) ServerOption { - return func(o *options) { - o.maxMsgSize = m - } -} - -// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number -// of concurrent streams to each ServerTransport. -func MaxConcurrentStreams(n uint32) ServerOption { - return func(o *options) { - o.maxConcurrentStreams = n - } -} - -// Creds returns a ServerOption that sets credentials for server connections. -func Creds(c credentials.TransportCredentials) ServerOption { - return func(o *options) { - o.creds = c - } -} - -// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the -// server. Only one unary interceptor can be installed. The construction of multiple -// interceptors (e.g., chaining) can be implemented at the caller. -func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { - return func(o *options) { - if o.unaryInt != nil { - panic("The unary server interceptor has been set.") - } - o.unaryInt = i - } -} - -// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the -// server. Only one stream interceptor can be installed. -func StreamInterceptor(i StreamServerInterceptor) ServerOption { - return func(o *options) { - if o.streamInt != nil { - panic("The stream server interceptor has been set.") - } - o.streamInt = i - } -} - -// NewServer creates a gRPC server which has no service registered and has not -// started to accept requests yet. -func NewServer(opt ...ServerOption) *Server { - var opts options - opts.maxMsgSize = defaultMaxMsgSize - for _, o := range opt { - o(&opts) - } - if opts.codec == nil { - // Set the default codec. - opts.codec = protoCodec{} - } - s := &Server{ - lis: make(map[net.Listener]bool), - opts: opts, - conns: make(map[io.Closer]bool), - m: make(map[string]*service), - } - s.cv = sync.NewCond(&s.mu) - if EnableTracing { - _, file, line, _ := runtime.Caller(1) - s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) - } - return s -} - -// printf records an event in s's event log, unless s has been stopped. -// REQUIRES s.mu is held. -func (s *Server) printf(format string, a ...interface{}) { - if s.events != nil { - s.events.Printf(format, a...) - } -} - -// errorf records an error in s's event log, unless s has been stopped. -// REQUIRES s.mu is held. -func (s *Server) errorf(format string, a ...interface{}) { - if s.events != nil { - s.events.Errorf(format, a...) - } -} - -// RegisterService register a service and its implementation to the gRPC -// server. Called from the IDL generated code. This must be called before -// invoking Serve. -func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { - ht := reflect.TypeOf(sd.HandlerType).Elem() - st := reflect.TypeOf(ss) - if !st.Implements(ht) { - grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) - } - s.register(sd, ss) -} - -func (s *Server) register(sd *ServiceDesc, ss interface{}) { - s.mu.Lock() - defer s.mu.Unlock() - s.printf("RegisterService(%q)", sd.ServiceName) - if _, ok := s.m[sd.ServiceName]; ok { - grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) - } - srv := &service{ - server: ss, - md: make(map[string]*MethodDesc), - sd: make(map[string]*StreamDesc), - mdata: sd.Metadata, - } - for i := range sd.Methods { - d := &sd.Methods[i] - srv.md[d.MethodName] = d - } - for i := range sd.Streams { - d := &sd.Streams[i] - srv.sd[d.StreamName] = d - } - s.m[sd.ServiceName] = srv -} - -// MethodInfo contains the information of an RPC including its method name and type. -type MethodInfo struct { - // Name is the method name only, without the service name or package name. - Name string - // IsClientStream indicates whether the RPC is a client streaming RPC. - IsClientStream bool - // IsServerStream indicates whether the RPC is a server streaming RPC. - IsServerStream bool -} - -// ServiceInfo contains unary RPC method info, streaming RPC methid info and metadata for a service. -type ServiceInfo struct { - Methods []MethodInfo - // Metadata is the metadata specified in ServiceDesc when registering service. - Metadata interface{} -} - -// GetServiceInfo returns a map from service names to ServiceInfo. -// Service names include the package names, in the form of .. -func (s *Server) GetServiceInfo() map[string]ServiceInfo { - ret := make(map[string]ServiceInfo) - for n, srv := range s.m { - methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd)) - for m := range srv.md { - methods = append(methods, MethodInfo{ - Name: m, - IsClientStream: false, - IsServerStream: false, - }) - } - for m, d := range srv.sd { - methods = append(methods, MethodInfo{ - Name: m, - IsClientStream: d.ClientStreams, - IsServerStream: d.ServerStreams, - }) - } - - ret[n] = ServiceInfo{ - Methods: methods, - Metadata: srv.mdata, - } - } - return ret -} - -var ( - // ErrServerStopped indicates that the operation is now illegal because of - // the server being stopped. - ErrServerStopped = errors.New("grpc: the server has been stopped") -) - -func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { - if s.opts.creds == nil { - return rawConn, nil, nil - } - return s.opts.creds.ServerHandshake(rawConn) -} - -// Serve accepts incoming connections on the listener lis, creating a new -// ServerTransport and service goroutine for each. The service goroutines -// read gRPC requests and then call the registered handlers to reply to them. -// Service returns when lis.Accept fails. lis will be closed when -// this method returns. -func (s *Server) Serve(lis net.Listener) error { - s.mu.Lock() - s.printf("serving") - if s.lis == nil { - s.mu.Unlock() - lis.Close() - return ErrServerStopped - } - s.lis[lis] = true - s.mu.Unlock() - defer func() { - s.mu.Lock() - if s.lis != nil && s.lis[lis] { - lis.Close() - delete(s.lis, lis) - } - s.mu.Unlock() - }() - for { - rawConn, err := lis.Accept() - if err != nil { - s.mu.Lock() - s.printf("done serving; Accept = %v", err) - s.mu.Unlock() - return err - } - // Start a new goroutine to deal with rawConn - // so we don't stall this Accept loop goroutine. - go s.handleRawConn(rawConn) - } -} - -// handleRawConn is run in its own goroutine and handles a just-accepted -// connection that has not had any I/O performed on it yet. -func (s *Server) handleRawConn(rawConn net.Conn) { - conn, authInfo, err := s.useTransportAuthenticator(rawConn) - if err != nil { - s.mu.Lock() - s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) - s.mu.Unlock() - grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) - // If serverHandShake returns ErrConnDispatched, keep rawConn open. - if err != credentials.ErrConnDispatched { - rawConn.Close() - } - return - } - - s.mu.Lock() - if s.conns == nil { - s.mu.Unlock() - conn.Close() - return - } - s.mu.Unlock() - - if s.opts.useHandlerImpl { - s.serveUsingHandler(conn) - } else { - s.serveNewHTTP2Transport(conn, authInfo) - } -} - -// serveNewHTTP2Transport sets up a new http/2 transport (using the -// gRPC http2 server transport in transport/http2_server.go) and -// serves streams on it. -// This is run in its own goroutine (it does network I/O in -// transport.NewServerTransport). -func (s *Server) serveNewHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) { - st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo) - if err != nil { - s.mu.Lock() - s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) - s.mu.Unlock() - c.Close() - grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err) - return - } - if !s.addConn(st) { - st.Close() - return - } - s.serveStreams(st) -} - -func (s *Server) serveStreams(st transport.ServerTransport) { - defer s.removeConn(st) - defer st.Close() - var wg sync.WaitGroup - st.HandleStreams(func(stream *transport.Stream) { - wg.Add(1) - go func() { - defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) - }() - }) - wg.Wait() -} - -var _ http.Handler = (*Server)(nil) - -// serveUsingHandler is called from handleRawConn when s is configured -// to handle requests via the http.Handler interface. It sets up a -// net/http.Server to handle the just-accepted conn. The http.Server -// is configured to route all incoming requests (all HTTP/2 streams) -// to ServeHTTP, which creates a new ServerTransport for each stream. -// serveUsingHandler blocks until conn closes. -// -// This codepath is only used when Server.TestingUseHandlerImpl has -// been configured. This lets the end2end tests exercise the ServeHTTP -// method as one of the environment types. -// -// conn is the *tls.Conn that's already been authenticated. -func (s *Server) serveUsingHandler(conn net.Conn) { - if !s.addConn(conn) { - conn.Close() - return - } - defer s.removeConn(conn) - h2s := &http2.Server{ - MaxConcurrentStreams: s.opts.maxConcurrentStreams, - } - h2s.ServeConn(conn, &http2.ServeConnOpts{ - Handler: s, - }) -} - -func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { - st, err := transport.NewServerHandlerTransport(w, r) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - if !s.addConn(st) { - st.Close() - return - } - defer s.removeConn(st) - s.serveStreams(st) -} - -// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. -// If tracing is not enabled, it returns nil. -func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { - if !EnableTracing { - return nil - } - trInfo = &traceInfo{ - tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()), - } - trInfo.firstLine.client = false - trInfo.firstLine.remoteAddr = st.RemoteAddr() - stream.TraceContext(trInfo.tr) - if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = dl.Sub(time.Now()) - } - return trInfo -} - -func (s *Server) addConn(c io.Closer) bool { - s.mu.Lock() - defer s.mu.Unlock() - if s.conns == nil || s.drain { - return false - } - s.conns[c] = true - return true -} - -func (s *Server) removeConn(c io.Closer) { - s.mu.Lock() - defer s.mu.Unlock() - if s.conns != nil { - delete(s.conns, c) - s.cv.Signal() - } -} - -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error { - var cbuf *bytes.Buffer - if cp != nil { - cbuf = new(bytes.Buffer) - } - p, err := encode(s.opts.codec, msg, cp, cbuf) - if err != nil { - // This typically indicates a fatal issue (e.g., memory - // corruption or hardware faults) the application program - // cannot handle. - // - // TODO(zhaoq): There exist other options also such as only closing the - // faulty stream locally and remotely (Other streams can keep going). Find - // the optimal option. - grpclog.Fatalf("grpc: Server failed to encode response %v", err) - } - return t.Write(stream, p, opts) -} - -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { - if trInfo != nil { - defer trInfo.tr.Finish() - trInfo.firstLine.client = false - trInfo.tr.LazyLog(&trInfo.firstLine, false) - defer func() { - if err != nil && err != io.EOF { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() - } - }() - } - if s.opts.cp != nil { - // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. - stream.SetSendCompress(s.opts.cp.Type()) - } - p := &parser{r: stream} - for { - pf, req, err := p.recvMsg(s.opts.maxMsgSize) - if err == io.EOF { - // The entire stream is done (for unary RPC only). - return err - } - if err == io.ErrUnexpectedEOF { - err = transport.StreamError{Code: codes.Internal, Desc: "io.ErrUnexpectedEOF"} - } - if err != nil { - switch err := err.(type) { - case *rpcError: - if err := t.WriteStatus(stream, err.code, err.desc); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) - } - case transport.ConnectionError: - // Nothing to do here. - case transport.StreamError: - if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) - } - default: - panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err)) - } - return err - } - - if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil { - switch err := err.(type) { - case transport.StreamError: - if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) - } - default: - if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) - } - - } - return err - } - statusCode := codes.OK - statusDesc := "" - df := func(v interface{}) error { - if pf == compressionMade { - var err error - req, err = s.opts.dc.Do(bytes.NewReader(req)) - if err != nil { - if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err) - } - return err - } - } - if len(req) > s.opts.maxMsgSize { - // TODO: Revisit the error code. Currently keep it consistent with - // java implementation. - statusCode = codes.Internal - statusDesc = fmt.Sprintf("grpc: server received a message of %d bytes exceeding %d limit", len(req), s.opts.maxMsgSize) - } - if err := s.opts.codec.Unmarshal(req, v); err != nil { - return err - } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) - } - return nil - } - reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt) - if appErr != nil { - if err, ok := appErr.(*rpcError); ok { - statusCode = err.code - statusDesc = err.desc - } else { - statusCode = convertCode(appErr) - statusDesc = appErr.Error() - } - if trInfo != nil && statusCode != codes.OK { - trInfo.tr.LazyLog(stringer(statusDesc), true) - trInfo.tr.SetError() - } - if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil { - grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err) - return err - } - return nil - } - if trInfo != nil { - trInfo.tr.LazyLog(stringer("OK"), false) - } - opts := &transport.Options{ - Last: true, - Delay: false, - } - if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil { - switch err := err.(type) { - case transport.ConnectionError: - // Nothing to do here. - case transport.StreamError: - statusCode = err.Code - statusDesc = err.Desc - default: - statusCode = codes.Unknown - statusDesc = err.Error() - } - return err - } - if trInfo != nil { - trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) - } - return t.WriteStatus(stream, statusCode, statusDesc) - } -} - -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { - if s.opts.cp != nil { - stream.SetSendCompress(s.opts.cp.Type()) - } - ss := &serverStream{ - t: t, - s: stream, - p: &parser{r: stream}, - codec: s.opts.codec, - cp: s.opts.cp, - dc: s.opts.dc, - maxMsgSize: s.opts.maxMsgSize, - trInfo: trInfo, - } - if ss.cp != nil { - ss.cbuf = new(bytes.Buffer) - } - if trInfo != nil { - trInfo.tr.LazyLog(&trInfo.firstLine, false) - defer func() { - ss.mu.Lock() - if err != nil && err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - ss.trInfo.tr.SetError() - } - ss.trInfo.tr.Finish() - ss.trInfo.tr = nil - ss.mu.Unlock() - }() - } - var appErr error - if s.opts.streamInt == nil { - appErr = sd.Handler(srv.server, ss) - } else { - info := &StreamServerInfo{ - FullMethod: stream.Method(), - IsClientStream: sd.ClientStreams, - IsServerStream: sd.ServerStreams, - } - appErr = s.opts.streamInt(srv.server, ss, info, sd.Handler) - } - if appErr != nil { - if err, ok := appErr.(*rpcError); ok { - ss.statusCode = err.code - ss.statusDesc = err.desc - } else if err, ok := appErr.(transport.StreamError); ok { - ss.statusCode = err.Code - ss.statusDesc = err.Desc - } else { - ss.statusCode = convertCode(appErr) - ss.statusDesc = appErr.Error() - } - } - if trInfo != nil { - ss.mu.Lock() - if ss.statusCode != codes.OK { - ss.trInfo.tr.LazyLog(stringer(ss.statusDesc), true) - ss.trInfo.tr.SetError() - } else { - ss.trInfo.tr.LazyLog(stringer("OK"), false) - } - ss.mu.Unlock() - } - return t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc) - -} - -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { - sm := stream.Method() - if sm != "" && sm[0] == '/' { - sm = sm[1:] - } - pos := strings.LastIndex(sm, "/") - if pos == -1 { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) - trInfo.tr.SetError() - } - if err := t.WriteStatus(stream, codes.InvalidArgument, fmt.Sprintf("malformed method name: %q", stream.Method())); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() - } - grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) - } - if trInfo != nil { - trInfo.tr.Finish() - } - return - } - service := sm[:pos] - method := sm[pos+1:] - srv, ok := s.m[service] - if !ok { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) - trInfo.tr.SetError() - } - if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown service %v", service)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() - } - grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) - } - if trInfo != nil { - trInfo.tr.Finish() - } - return - } - // Unary RPC or Streaming RPC? - if md, ok := srv.md[method]; ok { - s.processUnaryRPC(t, stream, srv, md, trInfo) - return - } - if sd, ok := srv.sd[method]; ok { - s.processStreamingRPC(t, stream, srv, sd, trInfo) - return - } - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true) - trInfo.tr.SetError() - } - if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown method %v", method)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - trInfo.tr.SetError() - } - grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err) - } - if trInfo != nil { - trInfo.tr.Finish() - } -} - -// Stop stops the gRPC server. It immediately closes all open -// connections and listeners. -// It cancels all active RPCs on the server side and the corresponding -// pending RPCs on the client side will get notified by connection -// errors. -func (s *Server) Stop() { - s.mu.Lock() - listeners := s.lis - s.lis = nil - st := s.conns - s.conns = nil - // interrupt GracefulStop if Stop and GracefulStop are called concurrently. - s.cv.Signal() - s.mu.Unlock() - - for lis := range listeners { - lis.Close() - } - for c := range st { - c.Close() - } - - s.mu.Lock() - if s.events != nil { - s.events.Finish() - s.events = nil - } - s.mu.Unlock() -} - -// GracefulStop stops the gRPC server gracefully. It stops the server to accept new -// connections and RPCs and blocks until all the pending RPCs are finished. -func (s *Server) GracefulStop() { - s.mu.Lock() - defer s.mu.Unlock() - if s.drain == true || s.conns == nil { - return - } - s.drain = true - for lis := range s.lis { - lis.Close() - } - s.lis = nil - for c := range s.conns { - c.(transport.ServerTransport).Drain() - } - for len(s.conns) != 0 { - s.cv.Wait() - } - s.conns = nil - if s.events != nil { - s.events.Finish() - s.events = nil - } -} - -func init() { - internal.TestingCloseConns = func(arg interface{}) { - arg.(*Server).testingCloseConns() - } - internal.TestingUseHandlerImpl = func(arg interface{}) { - arg.(*Server).opts.useHandlerImpl = true - } -} - -// testingCloseConns closes all existing transports but keeps s.lis -// accepting new connections. -func (s *Server) testingCloseConns() { - s.mu.Lock() - for c := range s.conns { - c.Close() - delete(s.conns, c) - } - s.mu.Unlock() -} - -// SendHeader sends header metadata. It may be called at most once from a unary -// RPC handler. The ctx is the RPC handler's Context or one derived from it. -func SendHeader(ctx context.Context, md metadata.MD) error { - if md.Len() == 0 { - return nil - } - stream, ok := transport.StreamFromContext(ctx) - if !ok { - return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx) - } - t := stream.ServerTransport() - if t == nil { - grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream) - } - return t.WriteHeader(stream, md) -} - -// SetTrailer sets the trailer metadata that will be sent when an RPC returns. -// It may be called at most once from a unary RPC handler. The ctx is the RPC -// handler's Context or one derived from it. -func SetTrailer(ctx context.Context, md metadata.MD) error { - if md.Len() == 0 { - return nil - } - stream, ok := transport.StreamFromContext(ctx) - if !ok { - return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx) - } - return stream.SetTrailer(md) -} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go deleted file mode 100644 index 51df3f01da..0000000000 --- a/vendor/google.golang.org/grpc/stream.go +++ /dev/null @@ -1,493 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "bytes" - "errors" - "io" - "math" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/trace" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/transport" -) - -// StreamHandler defines the handler called by gRPC server to complete the -// execution of a streaming RPC. -type StreamHandler func(srv interface{}, stream ServerStream) error - -// StreamDesc represents a streaming RPC service's method specification. -type StreamDesc struct { - StreamName string - Handler StreamHandler - - // At least one of these is true. - ServerStreams bool - ClientStreams bool -} - -// Stream defines the common interface a client or server stream has to satisfy. -type Stream interface { - // Context returns the context for this stream. - Context() context.Context - // SendMsg blocks until it sends m, the stream is done or the stream - // breaks. - // On error, it aborts the stream and returns an RPC status on client - // side. On server side, it simply returns the error to the caller. - // SendMsg is called by generated code. Also Users can call SendMsg - // directly when it is really needed in their use cases. - SendMsg(m interface{}) error - // RecvMsg blocks until it receives a message or the stream is - // done. On client side, it returns io.EOF when the stream is done. On - // any other error, it aborts the stream and returns an RPC status. On - // server side, it simply returns the error to the caller. - RecvMsg(m interface{}) error -} - -// ClientStream defines the interface a client stream has to satisfy. -type ClientStream interface { - // Header returns the header metadata received from the server if there - // is any. It blocks if the metadata is not ready to read. - Header() (metadata.MD, error) - // Trailer returns the trailer metadata from the server, if there is any. - // It must only be called after stream.CloseAndRecv has returned, or - // stream.Recv has returned a non-nil error (including io.EOF). - Trailer() metadata.MD - // CloseSend closes the send direction of the stream. It closes the stream - // when non-nil error is met. - CloseSend() error - Stream -} - -// NewClientStream creates a new Stream for the client side. This is called -// by generated code. -func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { - var ( - t transport.ClientTransport - s *transport.Stream - put func() - ) - c := defaultCallInfo - for _, o := range opts { - if err := o.before(&c); err != nil { - return nil, toRPCErr(err) - } - } - callHdr := &transport.CallHdr{ - Host: cc.authority, - Method: method, - Flush: desc.ServerStreams && desc.ClientStreams, - } - if cc.dopts.cp != nil { - callHdr.SendCompress = cc.dopts.cp.Type() - } - var trInfo traceInfo - if EnableTracing { - trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) - trInfo.firstLine.client = true - if deadline, ok := ctx.Deadline(); ok { - trInfo.firstLine.deadline = deadline.Sub(time.Now()) - } - trInfo.tr.LazyLog(&trInfo.firstLine, false) - ctx = trace.NewContext(ctx, trInfo.tr) - defer func() { - if err != nil { - // Need to call tr.finish() if error is returned. - // Because tr will not be returned to caller. - trInfo.tr.LazyPrintf("RPC: [%v]", err) - trInfo.tr.SetError() - trInfo.tr.Finish() - } - }() - } - gopts := BalancerGetOptions{ - BlockingWait: !c.failFast, - } - for { - t, put, err = cc.getTransport(ctx, gopts) - if err != nil { - // TODO(zhaoq): Probably revisit the error handling. - if _, ok := err.(*rpcError); ok { - return nil, err - } - if err == errConnClosing || err == errConnUnavailable { - if c.failFast { - return nil, Errorf(codes.Unavailable, "%v", err) - } - continue - } - // All the other errors are treated as Internal errors. - return nil, Errorf(codes.Internal, "%v", err) - } - - s, err = t.NewStream(ctx, callHdr) - if err != nil { - if put != nil { - put() - put = nil - } - if _, ok := err.(transport.ConnectionError); ok || err == transport.ErrStreamDrain { - if c.failFast { - return nil, toRPCErr(err) - } - continue - } - return nil, toRPCErr(err) - } - break - } - cs := &clientStream{ - opts: opts, - c: c, - desc: desc, - codec: cc.dopts.codec, - cp: cc.dopts.cp, - dc: cc.dopts.dc, - - put: put, - t: t, - s: s, - p: &parser{r: s}, - - tracing: EnableTracing, - trInfo: trInfo, - } - if cc.dopts.cp != nil { - cs.cbuf = new(bytes.Buffer) - } - // Listen on ctx.Done() to detect cancellation and s.Done() to detect normal termination - // when there is no pending I/O operations on this stream. - go func() { - select { - case <-t.Error(): - // Incur transport error, simply exit. - case <-s.Done(): - // TODO: The trace of the RPC is terminated here when there is no pending - // I/O, which is probably not the optimal solution. - if s.StatusCode() == codes.OK { - cs.finish(nil) - } else { - cs.finish(Errorf(s.StatusCode(), "%s", s.StatusDesc())) - } - cs.closeTransportStream(nil) - case <-s.GoAway(): - cs.finish(errConnDrain) - cs.closeTransportStream(errConnDrain) - case <-s.Context().Done(): - err := s.Context().Err() - cs.finish(err) - cs.closeTransportStream(transport.ContextErr(err)) - } - }() - return cs, nil -} - -// clientStream implements a client side Stream. -type clientStream struct { - opts []CallOption - c callInfo - t transport.ClientTransport - s *transport.Stream - p *parser - desc *StreamDesc - codec Codec - cp Compressor - cbuf *bytes.Buffer - dc Decompressor - - tracing bool // set to EnableTracing when the clientStream is created. - - mu sync.Mutex - put func() - closed bool - // trInfo.tr is set when the clientStream is created (if EnableTracing is true), - // and is set to nil when the clientStream's finish method is called. - trInfo traceInfo -} - -func (cs *clientStream) Context() context.Context { - return cs.s.Context() -} - -func (cs *clientStream) Header() (metadata.MD, error) { - m, err := cs.s.Header() - if err != nil { - if _, ok := err.(transport.ConnectionError); !ok { - cs.closeTransportStream(err) - } - } - return m, err -} - -func (cs *clientStream) Trailer() metadata.MD { - return cs.s.Trailer() -} - -func (cs *clientStream) SendMsg(m interface{}) (err error) { - if cs.tracing { - cs.mu.Lock() - if cs.trInfo.tr != nil { - cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) - } - cs.mu.Unlock() - } - defer func() { - if err != nil { - cs.finish(err) - } - if err == nil { - return - } - if err == io.EOF { - // Specialize the process for server streaming. SendMesg is only called - // once when creating the stream object. io.EOF needs to be skipped when - // the rpc is early finished (before the stream object is created.). - // TODO: It is probably better to move this into the generated code. - if !cs.desc.ClientStreams && cs.desc.ServerStreams { - err = nil - } - return - } - if _, ok := err.(transport.ConnectionError); !ok { - cs.closeTransportStream(err) - } - err = toRPCErr(err) - }() - out, err := encode(cs.codec, m, cs.cp, cs.cbuf) - defer func() { - if cs.cbuf != nil { - cs.cbuf.Reset() - } - }() - if err != nil { - return transport.StreamErrorf(codes.Internal, "grpc: %v", err) - } - return cs.t.Write(cs.s, out, &transport.Options{Last: false}) -} - -func (cs *clientStream) RecvMsg(m interface{}) (err error) { - err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32) - defer func() { - // err != nil indicates the termination of the stream. - if err != nil { - cs.finish(err) - } - }() - if err == nil { - if cs.tracing { - cs.mu.Lock() - if cs.trInfo.tr != nil { - cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) - } - cs.mu.Unlock() - } - if !cs.desc.ClientStreams || cs.desc.ServerStreams { - return - } - // Special handling for client streaming rpc. - err = recv(cs.p, cs.codec, cs.s, cs.dc, m, math.MaxInt32) - cs.closeTransportStream(err) - if err == nil { - return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) - } - if err == io.EOF { - if cs.s.StatusCode() == codes.OK { - cs.finish(err) - return nil - } - return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc()) - } - return toRPCErr(err) - } - if _, ok := err.(transport.ConnectionError); !ok { - cs.closeTransportStream(err) - } - if err == io.EOF { - if cs.s.StatusCode() == codes.OK { - // Returns io.EOF to indicate the end of the stream. - return - } - return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc()) - } - return toRPCErr(err) -} - -func (cs *clientStream) CloseSend() (err error) { - err = cs.t.Write(cs.s, nil, &transport.Options{Last: true}) - defer func() { - if err != nil { - cs.finish(err) - } - }() - if err == nil || err == io.EOF { - return nil - } - if _, ok := err.(transport.ConnectionError); !ok { - cs.closeTransportStream(err) - } - err = toRPCErr(err) - return -} - -func (cs *clientStream) closeTransportStream(err error) { - cs.mu.Lock() - if cs.closed { - cs.mu.Unlock() - return - } - cs.closed = true - cs.mu.Unlock() - cs.t.CloseStream(cs.s, err) -} - -func (cs *clientStream) finish(err error) { - cs.mu.Lock() - defer cs.mu.Unlock() - for _, o := range cs.opts { - o.after(&cs.c) - } - if cs.put != nil { - cs.put() - cs.put = nil - } - if !cs.tracing { - return - } - if cs.trInfo.tr != nil { - if err == nil || err == io.EOF { - cs.trInfo.tr.LazyPrintf("RPC: [OK]") - } else { - cs.trInfo.tr.LazyPrintf("RPC: [%v]", err) - cs.trInfo.tr.SetError() - } - cs.trInfo.tr.Finish() - cs.trInfo.tr = nil - } -} - -// ServerStream defines the interface a server stream has to satisfy. -type ServerStream interface { - // SendHeader sends the header metadata. It should not be called - // after SendProto. It fails if called multiple times or if - // called after SendProto. - SendHeader(metadata.MD) error - // SetTrailer sets the trailer metadata which will be sent with the - // RPC status. - SetTrailer(metadata.MD) - Stream -} - -// serverStream implements a server side Stream. -type serverStream struct { - t transport.ServerTransport - s *transport.Stream - p *parser - codec Codec - cp Compressor - dc Decompressor - cbuf *bytes.Buffer - maxMsgSize int - statusCode codes.Code - statusDesc string - trInfo *traceInfo - - mu sync.Mutex // protects trInfo.tr after the service handler runs. -} - -func (ss *serverStream) Context() context.Context { - return ss.s.Context() -} - -func (ss *serverStream) SendHeader(md metadata.MD) error { - return ss.t.WriteHeader(ss.s, md) -} - -func (ss *serverStream) SetTrailer(md metadata.MD) { - if md.Len() == 0 { - return - } - ss.s.SetTrailer(md) - return -} - -func (ss *serverStream) SendMsg(m interface{}) (err error) { - defer func() { - if ss.trInfo != nil { - ss.mu.Lock() - if ss.trInfo.tr != nil { - if err == nil { - ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) - } else { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - ss.trInfo.tr.SetError() - } - } - ss.mu.Unlock() - } - }() - out, err := encode(ss.codec, m, ss.cp, ss.cbuf) - defer func() { - if ss.cbuf != nil { - ss.cbuf.Reset() - } - }() - if err != nil { - err = transport.StreamErrorf(codes.Internal, "grpc: %v", err) - return err - } - return ss.t.Write(ss.s, out, &transport.Options{Last: false}) -} - -func (ss *serverStream) RecvMsg(m interface{}) (err error) { - defer func() { - if ss.trInfo != nil { - ss.mu.Lock() - if ss.trInfo.tr != nil { - if err == nil { - ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) - } else if err != io.EOF { - ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) - ss.trInfo.tr.SetError() - } - } - ss.mu.Unlock() - } - }() - return recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxMsgSize) -} diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go deleted file mode 100644 index f6747e1dfa..0000000000 --- a/vendor/google.golang.org/grpc/trace.go +++ /dev/null @@ -1,119 +0,0 @@ -/* - * - * Copyright 2015, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package grpc - -import ( - "bytes" - "fmt" - "io" - "net" - "strings" - "time" - - "golang.org/x/net/trace" -) - -// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. -// This should only be set before any RPCs are sent or received by this program. -var EnableTracing = true - -// methodFamily returns the trace family for the given method. -// It turns "/pkg.Service/GetFoo" into "pkg.Service". -func methodFamily(m string) string { - m = strings.TrimPrefix(m, "/") // remove leading slash - if i := strings.Index(m, "/"); i >= 0 { - m = m[:i] // remove everything from second slash - } - if i := strings.LastIndex(m, "."); i >= 0 { - m = m[i+1:] // cut down to last dotted component - } - return m -} - -// traceInfo contains tracing information for an RPC. -type traceInfo struct { - tr trace.Trace - firstLine firstLine -} - -// firstLine is the first line of an RPC trace. -type firstLine struct { - client bool // whether this is a client (outgoing) RPC - remoteAddr net.Addr - deadline time.Duration // may be zero -} - -func (f *firstLine) String() string { - var line bytes.Buffer - io.WriteString(&line, "RPC: ") - if f.client { - io.WriteString(&line, "to") - } else { - io.WriteString(&line, "from") - } - fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) - if f.deadline != 0 { - fmt.Fprint(&line, f.deadline) - } else { - io.WriteString(&line, "none") - } - return line.String() -} - -// payload represents an RPC request or response payload. -type payload struct { - sent bool // whether this is an outgoing payload - msg interface{} // e.g. a proto.Message - // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? -} - -func (p payload) String() string { - if p.sent { - return fmt.Sprintf("sent: %v", p.msg) - } - return fmt.Sprintf("recv: %v", p.msg) -} - -type fmtStringer struct { - format string - a []interface{} -} - -func (f *fmtStringer) String() string { - return fmt.Sprintf(f.format, f.a...) -} - -type stringer string - -func (s stringer) String() string { return string(s) } diff --git a/vendor/google.golang.org/grpc/transport/control.go b/vendor/google.golang.org/grpc/transport/control.go deleted file mode 100644 index 4ef0830b56..0000000000 --- a/vendor/google.golang.org/grpc/transport/control.go +++ /dev/null @@ -1,215 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "fmt" - "sync" - - "golang.org/x/net/http2" -) - -const ( - // The default value of flow control window size in HTTP2 spec. - defaultWindowSize = 65535 - // The initial window size for flow control. - initialWindowSize = defaultWindowSize // for an RPC - initialConnWindowSize = defaultWindowSize * 16 // for a connection -) - -// The following defines various control items which could flow through -// the control buffer of transport. They represent different aspects of -// control tasks, e.g., flow control, settings, streaming resetting, etc. -type windowUpdate struct { - streamID uint32 - increment uint32 -} - -func (*windowUpdate) item() {} - -type settings struct { - ack bool - ss []http2.Setting -} - -func (*settings) item() {} - -type resetStream struct { - streamID uint32 - code http2.ErrCode -} - -func (*resetStream) item() {} - -type goAway struct { -} - -func (*goAway) item() {} - -type flushIO struct { -} - -func (*flushIO) item() {} - -type ping struct { - ack bool - data [8]byte -} - -func (*ping) item() {} - -// quotaPool is a pool which accumulates the quota and sends it to acquire() -// when it is available. -type quotaPool struct { - c chan int - - mu sync.Mutex - quota int -} - -// newQuotaPool creates a quotaPool which has quota q available to consume. -func newQuotaPool(q int) *quotaPool { - qb := "aPool{ - c: make(chan int, 1), - } - if q > 0 { - qb.c <- q - } else { - qb.quota = q - } - return qb -} - -// add adds n to the available quota and tries to send it on acquire. -func (qb *quotaPool) add(n int) { - qb.mu.Lock() - defer qb.mu.Unlock() - qb.quota += n - if qb.quota <= 0 { - return - } - select { - case qb.c <- qb.quota: - qb.quota = 0 - default: - } -} - -// cancel cancels the pending quota sent on acquire, if any. -func (qb *quotaPool) cancel() { - qb.mu.Lock() - defer qb.mu.Unlock() - select { - case n := <-qb.c: - qb.quota += n - default: - } -} - -// reset cancels the pending quota sent on acquired, incremented by v and sends -// it back on acquire. -func (qb *quotaPool) reset(v int) { - qb.mu.Lock() - defer qb.mu.Unlock() - select { - case n := <-qb.c: - qb.quota += n - default: - } - qb.quota += v - if qb.quota <= 0 { - return - } - select { - case qb.c <- qb.quota: - qb.quota = 0 - default: - } -} - -// acquire returns the channel on which available quota amounts are sent. -func (qb *quotaPool) acquire() <-chan int { - return qb.c -} - -// inFlow deals with inbound flow control -type inFlow struct { - // The inbound flow control limit for pending data. - limit uint32 - - mu sync.Mutex - // pendingData is the overall data which have been received but not been - // consumed by applications. - pendingData uint32 - // The amount of data the application has consumed but grpc has not sent - // window update for them. Used to reduce window update frequency. - pendingUpdate uint32 -} - -// onData is invoked when some data frame is received. It updates pendingData. -func (f *inFlow) onData(n uint32) error { - f.mu.Lock() - defer f.mu.Unlock() - f.pendingData += n - if f.pendingData+f.pendingUpdate > f.limit { - return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit) - } - return nil -} - -// onRead is invoked when the application reads the data. It returns the window size -// to be sent to the peer. -func (f *inFlow) onRead(n uint32) uint32 { - f.mu.Lock() - defer f.mu.Unlock() - if f.pendingData == 0 { - return 0 - } - f.pendingData -= n - f.pendingUpdate += n - if f.pendingUpdate >= f.limit/4 { - wu := f.pendingUpdate - f.pendingUpdate = 0 - return wu - } - return 0 -} - -func (f *inFlow) resetPendingData() uint32 { - f.mu.Lock() - defer f.mu.Unlock() - n := f.pendingData - f.pendingData = 0 - return n -} diff --git a/vendor/google.golang.org/grpc/transport/go16.go b/vendor/google.golang.org/grpc/transport/go16.go deleted file mode 100644 index ee1c46bad5..0000000000 --- a/vendor/google.golang.org/grpc/transport/go16.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build go1.6,!go1.7 - -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "net" - - "golang.org/x/net/context" -) - -// dialContext connects to the address on the named network. -func dialContext(ctx context.Context, network, address string) (net.Conn, error) { - return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) -} diff --git a/vendor/google.golang.org/grpc/transport/go17.go b/vendor/google.golang.org/grpc/transport/go17.go deleted file mode 100644 index 356f13ff19..0000000000 --- a/vendor/google.golang.org/grpc/transport/go17.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build go1.7 - -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "net" - - "golang.org/x/net/context" -) - -// dialContext connects to the address on the named network. -func dialContext(ctx context.Context, network, address string) (net.Conn, error) { - return (&net.Dialer{}).DialContext(ctx, network, address) -} diff --git a/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/google.golang.org/grpc/transport/handler_server.go deleted file mode 100644 index f23b2daff5..0000000000 --- a/vendor/google.golang.org/grpc/transport/handler_server.go +++ /dev/null @@ -1,397 +0,0 @@ -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -// This file is the implementation of a gRPC server using HTTP/2 which -// uses the standard Go http2 Server implementation (via the -// http.Handler interface), rather than speaking low-level HTTP/2 -// frames itself. It is the implementation of *grpc.Server.ServeHTTP. - -package transport - -import ( - "errors" - "fmt" - "io" - "net" - "net/http" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/http2" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" -) - -// NewServerHandlerTransport returns a ServerTransport handling gRPC -// from inside an http.Handler. It requires that the http Server -// supports HTTP/2. -func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) { - if r.ProtoMajor != 2 { - return nil, errors.New("gRPC requires HTTP/2") - } - if r.Method != "POST" { - return nil, errors.New("invalid gRPC request method") - } - if !validContentType(r.Header.Get("Content-Type")) { - return nil, errors.New("invalid gRPC request content-type") - } - if _, ok := w.(http.Flusher); !ok { - return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") - } - if _, ok := w.(http.CloseNotifier); !ok { - return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier") - } - - st := &serverHandlerTransport{ - rw: w, - req: r, - closedCh: make(chan struct{}), - writes: make(chan func()), - } - - if v := r.Header.Get("grpc-timeout"); v != "" { - to, err := decodeTimeout(v) - if err != nil { - return nil, StreamErrorf(codes.Internal, "malformed time-out: %v", err) - } - st.timeoutSet = true - st.timeout = to - } - - var metakv []string - if r.Host != "" { - metakv = append(metakv, ":authority", r.Host) - } - for k, vv := range r.Header { - k = strings.ToLower(k) - if isReservedHeader(k) && !isWhitelistedPseudoHeader(k) { - continue - } - for _, v := range vv { - if k == "user-agent" { - // user-agent is special. Copying logic of http_util.go. - if i := strings.LastIndex(v, " "); i == -1 { - // There is no application user agent string being set - continue - } else { - v = v[:i] - } - } - metakv = append(metakv, k, v) - } - } - st.headerMD = metadata.Pairs(metakv...) - - return st, nil -} - -// serverHandlerTransport is an implementation of ServerTransport -// which replies to exactly one gRPC request (exactly one HTTP request), -// using the net/http.Handler interface. This http.Handler is guaranteed -// at this point to be speaking over HTTP/2, so it's able to speak valid -// gRPC. -type serverHandlerTransport struct { - rw http.ResponseWriter - req *http.Request - timeoutSet bool - timeout time.Duration - didCommonHeaders bool - - headerMD metadata.MD - - closeOnce sync.Once - closedCh chan struct{} // closed on Close - - // writes is a channel of code to run serialized in the - // ServeHTTP (HandleStreams) goroutine. The channel is closed - // when WriteStatus is called. - writes chan func() -} - -func (ht *serverHandlerTransport) Close() error { - ht.closeOnce.Do(ht.closeCloseChanOnce) - return nil -} - -func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } - -func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } - -// strAddr is a net.Addr backed by either a TCP "ip:port" string, or -// the empty string if unknown. -type strAddr string - -func (a strAddr) Network() string { - if a != "" { - // Per the documentation on net/http.Request.RemoteAddr, if this is - // set, it's set to the IP:port of the peer (hence, TCP): - // https://golang.org/pkg/net/http/#Request - // - // If we want to support Unix sockets later, we can - // add our own grpc-specific convention within the - // grpc codebase to set RemoteAddr to a different - // format, or probably better: we can attach it to the - // context and use that from serverHandlerTransport.RemoteAddr. - return "tcp" - } - return "" -} - -func (a strAddr) String() string { return string(a) } - -// do runs fn in the ServeHTTP goroutine. -func (ht *serverHandlerTransport) do(fn func()) error { - select { - case ht.writes <- fn: - return nil - case <-ht.closedCh: - return ErrConnClosing - } -} - -func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error { - err := ht.do(func() { - ht.writeCommonHeaders(s) - - // And flush, in case no header or body has been sent yet. - // This forces a separation of headers and trailers if this is the - // first call (for example, in end2end tests's TestNoService). - ht.rw.(http.Flusher).Flush() - - h := ht.rw.Header() - h.Set("Grpc-Status", fmt.Sprintf("%d", statusCode)) - if statusDesc != "" { - h.Set("Grpc-Message", encodeGrpcMessage(statusDesc)) - } - if md := s.Trailer(); len(md) > 0 { - for k, vv := range md { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - if isReservedHeader(k) { - continue - } - for _, v := range vv { - // http2 ResponseWriter mechanism to - // send undeclared Trailers after the - // headers have possibly been written. - h.Add(http2.TrailerPrefix+k, v) - } - } - } - }) - close(ht.writes) - return err -} - -// writeCommonHeaders sets common headers on the first write -// call (Write, WriteHeader, or WriteStatus). -func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { - if ht.didCommonHeaders { - return - } - ht.didCommonHeaders = true - - h := ht.rw.Header() - h["Date"] = nil // suppress Date to make tests happy; TODO: restore - h.Set("Content-Type", "application/grpc") - - // Predeclare trailers we'll set later in WriteStatus (after the body). - // This is a SHOULD in the HTTP RFC, and the way you add (known) - // Trailers per the net/http.ResponseWriter contract. - // See https://golang.org/pkg/net/http/#ResponseWriter - // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers - h.Add("Trailer", "Grpc-Status") - h.Add("Trailer", "Grpc-Message") - - if s.sendCompress != "" { - h.Set("Grpc-Encoding", s.sendCompress) - } -} - -func (ht *serverHandlerTransport) Write(s *Stream, data []byte, opts *Options) error { - return ht.do(func() { - ht.writeCommonHeaders(s) - ht.rw.Write(data) - if !opts.Delay { - ht.rw.(http.Flusher).Flush() - } - }) -} - -func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { - return ht.do(func() { - ht.writeCommonHeaders(s) - h := ht.rw.Header() - for k, vv := range md { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - if isReservedHeader(k) { - continue - } - for _, v := range vv { - h.Add(k, v) - } - } - ht.rw.WriteHeader(200) - ht.rw.(http.Flusher).Flush() - }) -} - -func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { - // With this transport type there will be exactly 1 stream: this HTTP request. - - var ctx context.Context - var cancel context.CancelFunc - if ht.timeoutSet { - ctx, cancel = context.WithTimeout(context.Background(), ht.timeout) - } else { - ctx, cancel = context.WithCancel(context.Background()) - } - - // requestOver is closed when either the request's context is done - // or the status has been written via WriteStatus. - requestOver := make(chan struct{}) - - // clientGone receives a single value if peer is gone, either - // because the underlying connection is dead or because the - // peer sends an http2 RST_STREAM. - clientGone := ht.rw.(http.CloseNotifier).CloseNotify() - go func() { - select { - case <-requestOver: - return - case <-ht.closedCh: - case <-clientGone: - } - cancel() - }() - - req := ht.req - - s := &Stream{ - id: 0, // irrelevant - windowHandler: func(int) {}, // nothing - cancel: cancel, - buf: newRecvBuffer(), - st: ht, - method: req.URL.Path, - recvCompress: req.Header.Get("grpc-encoding"), - } - pr := &peer.Peer{ - Addr: ht.RemoteAddr(), - } - if req.TLS != nil { - pr.AuthInfo = credentials.TLSInfo{State: *req.TLS} - } - ctx = metadata.NewContext(ctx, ht.headerMD) - ctx = peer.NewContext(ctx, pr) - s.ctx = newContextWithStream(ctx, s) - s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf} - - // readerDone is closed when the Body.Read-ing goroutine exits. - readerDone := make(chan struct{}) - go func() { - defer close(readerDone) - - // TODO: minimize garbage, optimize recvBuffer code/ownership - const readSize = 8196 - for buf := make([]byte, readSize); ; { - n, err := req.Body.Read(buf) - if n > 0 { - s.buf.put(&recvMsg{data: buf[:n:n]}) - buf = buf[n:] - } - if err != nil { - s.buf.put(&recvMsg{err: mapRecvMsgError(err)}) - return - } - if len(buf) == 0 { - buf = make([]byte, readSize) - } - } - }() - - // startStream is provided by the *grpc.Server's serveStreams. - // It starts a goroutine serving s and exits immediately. - // The goroutine that is started is the one that then calls - // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. - startStream(s) - - ht.runStream() - close(requestOver) - - // Wait for reading goroutine to finish. - req.Body.Close() - <-readerDone -} - -func (ht *serverHandlerTransport) runStream() { - for { - select { - case fn, ok := <-ht.writes: - if !ok { - return - } - fn() - case <-ht.closedCh: - return - } - } -} - -func (ht *serverHandlerTransport) Drain() { - panic("Drain() is not implemented") -} - -// mapRecvMsgError returns the non-nil err into the appropriate -// error value as expected by callers of *grpc.parser.recvMsg. -// In particular, in can only be: -// * io.EOF -// * io.ErrUnexpectedEOF -// * of type transport.ConnectionError -// * of type transport.StreamError -func mapRecvMsgError(err error) error { - if err == io.EOF || err == io.ErrUnexpectedEOF { - return err - } - if se, ok := err.(http2.StreamError); ok { - if code, ok := http2ErrConvTab[se.Code]; ok { - return StreamError{ - Code: code, - Desc: se.Error(), - } - } - } - return ConnectionErrorf(true, err, err.Error()) -} diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go deleted file mode 100644 index afbba45467..0000000000 --- a/vendor/google.golang.org/grpc/transport/http2_client.go +++ /dev/null @@ -1,1056 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "bytes" - "fmt" - "io" - "math" - "net" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" -) - -// http2Client implements the ClientTransport interface with HTTP2. -type http2Client struct { - target string // server name/addr - userAgent string - conn net.Conn // underlying communication channel - authInfo credentials.AuthInfo // auth info about the connection - nextID uint32 // the next stream ID to be used - - // writableChan synchronizes write access to the transport. - // A writer acquires the write lock by sending a value on writableChan - // and releases it by receiving from writableChan. - writableChan chan int - // shutdownChan is closed when Close is called. - // Blocking operations should select on shutdownChan to avoid - // blocking forever after Close. - // TODO(zhaoq): Maybe have a channel context? - shutdownChan chan struct{} - // errorChan is closed to notify the I/O error to the caller. - errorChan chan struct{} - // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) - // that the server sent GoAway on this transport. - goAway chan struct{} - - framer *framer - hBuf *bytes.Buffer // the buffer for HPACK encoding - hEnc *hpack.Encoder // HPACK encoder - - // controlBuf delivers all the control related tasks (e.g., window - // updates, reset streams, and various settings) to the controller. - controlBuf *recvBuffer - fc *inFlow - // sendQuotaPool provides flow control to outbound message. - sendQuotaPool *quotaPool - // streamsQuota limits the max number of concurrent streams. - streamsQuota *quotaPool - - // The scheme used: https if TLS is on, http otherwise. - scheme string - - creds []credentials.PerRPCCredentials - - mu sync.Mutex // guard the following variables - state transportState // the state of underlying connection - activeStreams map[uint32]*Stream - // The max number of concurrent streams - maxStreams int - // the per-stream outbound flow control window size set by the peer. - streamSendQuota uint32 - // goAwayID records the Last-Stream-ID in the GoAway frame from the server. - goAwayID uint32 - // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. - prevGoAwayID uint32 -} - -func dial(fn func(context.Context, string) (net.Conn, error), ctx context.Context, addr string) (net.Conn, error) { - if fn != nil { - return fn(ctx, addr) - } - return dialContext(ctx, "tcp", addr) -} - -func isTemporary(err error) bool { - switch err { - case io.EOF: - // Connection closures may be resolved upon retry, and are thus - // treated as temporary. - return true - case context.DeadlineExceeded: - // In Go 1.7, context.DeadlineExceeded implements Timeout(), and this - // special case is not needed. Until then, we need to keep this - // clause. - return true - } - - switch err := err.(type) { - case interface { - Temporary() bool - }: - return err.Temporary() - case interface { - Timeout() bool - }: - // Timeouts may be resolved upon retry, and are thus treated as - // temporary. - return err.Timeout() - } - return false -} - -// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 -// and starts to receive messages on it. Non-nil error returns if construction -// fails. -func newHTTP2Client(ctx context.Context, addr string, opts ConnectOptions) (_ ClientTransport, err error) { - scheme := "http" - conn, err := dial(opts.Dialer, ctx, addr) - if err != nil { - return nil, ConnectionErrorf(true, err, "transport: %v", err) - } - // Any further errors will close the underlying connection - defer func(conn net.Conn) { - if err != nil { - conn.Close() - } - }(conn) - var authInfo credentials.AuthInfo - if creds := opts.TransportCredentials; creds != nil { - scheme = "https" - conn, authInfo, err = creds.ClientHandshake(ctx, addr, conn) - if err != nil { - // Credentials handshake errors are typically considered permanent - // to avoid retrying on e.g. bad certificates. - temp := isTemporary(err) - return nil, ConnectionErrorf(temp, err, "transport: %v", err) - } - } - ua := primaryUA - if opts.UserAgent != "" { - ua = opts.UserAgent + " " + ua - } - var buf bytes.Buffer - t := &http2Client{ - target: addr, - userAgent: ua, - conn: conn, - authInfo: authInfo, - // The client initiated stream id is odd starting from 1. - nextID: 1, - writableChan: make(chan int, 1), - shutdownChan: make(chan struct{}), - errorChan: make(chan struct{}), - goAway: make(chan struct{}), - framer: newFramer(conn), - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - controlBuf: newRecvBuffer(), - fc: &inFlow{limit: initialConnWindowSize}, - sendQuotaPool: newQuotaPool(defaultWindowSize), - scheme: scheme, - state: reachable, - activeStreams: make(map[uint32]*Stream), - creds: opts.PerRPCCredentials, - maxStreams: math.MaxInt32, - streamSendQuota: defaultWindowSize, - } - // Start the reader goroutine for incoming message. Each transport has - // a dedicated goroutine which reads HTTP2 frame from network. Then it - // dispatches the frame to the corresponding stream entity. - go t.reader() - // Send connection preface to server. - n, err := t.conn.Write(clientPreface) - if err != nil { - t.Close() - return nil, ConnectionErrorf(true, err, "transport: %v", err) - } - if n != len(clientPreface) { - t.Close() - return nil, ConnectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) - } - if initialWindowSize != defaultWindowSize { - err = t.framer.writeSettings(true, http2.Setting{ - ID: http2.SettingInitialWindowSize, - Val: uint32(initialWindowSize), - }) - } else { - err = t.framer.writeSettings(true) - } - if err != nil { - t.Close() - return nil, ConnectionErrorf(true, err, "transport: %v", err) - } - // Adjust the connection flow control window if needed. - if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { - if err := t.framer.writeWindowUpdate(true, 0, delta); err != nil { - t.Close() - return nil, ConnectionErrorf(true, err, "transport: %v", err) - } - } - go t.controller() - t.writableChan <- 0 - return t, nil -} - -func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { - // TODO(zhaoq): Handle uint32 overflow of Stream.id. - s := &Stream{ - id: t.nextID, - done: make(chan struct{}), - goAway: make(chan struct{}), - method: callHdr.Method, - sendCompress: callHdr.SendCompress, - buf: newRecvBuffer(), - fc: &inFlow{limit: initialWindowSize}, - sendQuotaPool: newQuotaPool(int(t.streamSendQuota)), - headerChan: make(chan struct{}), - } - t.nextID += 2 - s.windowHandler = func(n int) { - t.updateWindow(s, uint32(n)) - } - // Make a stream be able to cancel the pending operations by itself. - s.ctx, s.cancel = context.WithCancel(ctx) - s.dec = &recvBufferReader{ - ctx: s.ctx, - goAway: s.goAway, - recv: s.buf, - } - return s -} - -// NewStream creates a stream and register it into the transport as "active" -// streams. -func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { - // Record the timeout value on the context. - var timeout time.Duration - if dl, ok := ctx.Deadline(); ok { - timeout = dl.Sub(time.Now()) - } - select { - case <-ctx.Done(): - return nil, ContextErr(ctx.Err()) - default: - } - pr := &peer.Peer{ - Addr: t.conn.RemoteAddr(), - } - // Attach Auth info if there is any. - if t.authInfo != nil { - pr.AuthInfo = t.authInfo - } - ctx = peer.NewContext(ctx, pr) - authData := make(map[string]string) - for _, c := range t.creds { - // Construct URI required to get auth request metadata. - var port string - if pos := strings.LastIndex(t.target, ":"); pos != -1 { - // Omit port if it is the default one. - if t.target[pos+1:] != "443" { - port = ":" + t.target[pos+1:] - } - } - pos := strings.LastIndex(callHdr.Method, "/") - if pos == -1 { - return nil, StreamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method) - } - audience := "https://" + callHdr.Host + port + callHdr.Method[:pos] - data, err := c.GetRequestMetadata(ctx, audience) - if err != nil { - return nil, StreamErrorf(codes.InvalidArgument, "transport: %v", err) - } - for k, v := range data { - authData[k] = v - } - } - t.mu.Lock() - if t.activeStreams == nil { - t.mu.Unlock() - return nil, ErrConnClosing - } - if t.state == draining { - t.mu.Unlock() - return nil, ErrStreamDrain - } - if t.state != reachable { - t.mu.Unlock() - return nil, ErrConnClosing - } - checkStreamsQuota := t.streamsQuota != nil - t.mu.Unlock() - if checkStreamsQuota { - sq, err := wait(ctx, nil, nil, t.shutdownChan, t.streamsQuota.acquire()) - if err != nil { - return nil, err - } - // Returns the quota balance back. - if sq > 1 { - t.streamsQuota.add(sq - 1) - } - } - if _, err := wait(ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { - // Return the quota back now because there is no stream returned to the caller. - if _, ok := err.(StreamError); ok && checkStreamsQuota { - t.streamsQuota.add(1) - } - return nil, err - } - t.mu.Lock() - if t.state == draining { - t.mu.Unlock() - if checkStreamsQuota { - t.streamsQuota.add(1) - } - // Need to make t writable again so that the rpc in flight can still proceed. - t.writableChan <- 0 - return nil, ErrStreamDrain - } - if t.state != reachable { - t.mu.Unlock() - return nil, ErrConnClosing - } - s := t.newStream(ctx, callHdr) - t.activeStreams[s.id] = s - - // This stream is not counted when applySetings(...) initialize t.streamsQuota. - // Reset t.streamsQuota to the right value. - var reset bool - if !checkStreamsQuota && t.streamsQuota != nil { - reset = true - } - t.mu.Unlock() - if reset { - t.streamsQuota.reset(-1) - } - - // HPACK encodes various headers. Note that once WriteField(...) is - // called, the corresponding headers/continuation frame has to be sent - // because hpack.Encoder is stateful. - t.hBuf.Reset() - t.hEnc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"}) - t.hEnc.WriteField(hpack.HeaderField{Name: ":scheme", Value: t.scheme}) - t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method}) - t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) - t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) - t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) - t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"}) - - if callHdr.SendCompress != "" { - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) - } - if timeout > 0 { - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) - } - for k, v := range authData { - // Capital header names are illegal in HTTP/2. - k = strings.ToLower(k) - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v}) - } - var ( - hasMD bool - endHeaders bool - ) - if md, ok := metadata.FromContext(ctx); ok { - hasMD = true - for k, v := range md { - // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. - if isReservedHeader(k) { - continue - } - for _, entry := range v { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) - } - } - } - first := true - // Sends the headers in a single batch even when they span multiple frames. - for !endHeaders { - size := t.hBuf.Len() - if size > http2MaxFrameLen { - size = http2MaxFrameLen - } else { - endHeaders = true - } - var flush bool - if endHeaders && (hasMD || callHdr.Flush) { - flush = true - } - if first { - // Sends a HeadersFrame to server to start a new stream. - p := http2.HeadersFrameParam{ - StreamID: s.id, - BlockFragment: t.hBuf.Next(size), - EndStream: false, - EndHeaders: endHeaders, - } - // Do a force flush for the buffered frames iff it is the last headers frame - // and there is header metadata to be sent. Otherwise, there is flushing until - // the corresponding data frame is written. - err = t.framer.writeHeaders(flush, p) - first = false - } else { - // Sends Continuation frames for the leftover headers. - err = t.framer.writeContinuation(flush, s.id, endHeaders, t.hBuf.Next(size)) - } - if err != nil { - t.notifyError(err) - return nil, ConnectionErrorf(true, err, "transport: %v", err) - } - } - t.writableChan <- 0 - return s, nil -} - -// CloseStream clears the footprint of a stream when the stream is not needed any more. -// This must not be executed in reader's goroutine. -func (t *http2Client) CloseStream(s *Stream, err error) { - var updateStreams bool - t.mu.Lock() - if t.activeStreams == nil { - t.mu.Unlock() - return - } - if t.streamsQuota != nil { - updateStreams = true - } - delete(t.activeStreams, s.id) - if t.state == draining && len(t.activeStreams) == 0 { - // The transport is draining and s is the last live stream on t. - t.mu.Unlock() - t.Close() - return - } - t.mu.Unlock() - if updateStreams { - t.streamsQuota.add(1) - } - s.mu.Lock() - if q := s.fc.resetPendingData(); q > 0 { - if n := t.fc.onRead(q); n > 0 { - t.controlBuf.put(&windowUpdate{0, n}) - } - } - if s.state == streamDone { - s.mu.Unlock() - return - } - if !s.headerDone { - close(s.headerChan) - s.headerDone = true - } - s.state = streamDone - s.mu.Unlock() - if _, ok := err.(StreamError); ok { - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeCancel}) - } -} - -// Close kicks off the shutdown process of the transport. This should be called -// only once on a transport. Once it is called, the transport should not be -// accessed any more. -func (t *http2Client) Close() (err error) { - t.mu.Lock() - if t.state == closing { - t.mu.Unlock() - return - } - if t.state == reachable || t.state == draining { - close(t.errorChan) - } - t.state = closing - t.mu.Unlock() - close(t.shutdownChan) - err = t.conn.Close() - t.mu.Lock() - streams := t.activeStreams - t.activeStreams = nil - t.mu.Unlock() - // Notify all active streams. - for _, s := range streams { - s.mu.Lock() - if !s.headerDone { - close(s.headerChan) - s.headerDone = true - } - s.mu.Unlock() - s.write(recvMsg{err: ErrConnClosing}) - } - return -} - -func (t *http2Client) GracefulClose() error { - t.mu.Lock() - switch t.state { - case unreachable: - // The server may close the connection concurrently. t is not available for - // any streams. Close it now. - t.mu.Unlock() - t.Close() - return nil - case closing: - t.mu.Unlock() - return nil - } - // Notify the streams which were initiated after the server sent GOAWAY. - select { - case <-t.goAway: - n := t.prevGoAwayID - if n == 0 && t.nextID > 1 { - n = t.nextID - 2 - } - m := t.goAwayID + 2 - if m == 2 { - m = 1 - } - for i := m; i <= n; i += 2 { - if s, ok := t.activeStreams[i]; ok { - close(s.goAway) - } - } - default: - } - if t.state == draining { - t.mu.Unlock() - return nil - } - t.state = draining - active := len(t.activeStreams) - t.mu.Unlock() - if active == 0 { - return t.Close() - } - return nil -} - -// Write formats the data into HTTP2 data frame(s) and sends it out. The caller -// should proceed only if Write returns nil. -// TODO(zhaoq): opts.Delay is ignored in this implementation. Support it later -// if it improves the performance. -func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error { - r := bytes.NewBuffer(data) - for { - var p []byte - if r.Len() > 0 { - size := http2MaxFrameLen - s.sendQuotaPool.add(0) - // Wait until the stream has some quota to send the data. - sq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, s.sendQuotaPool.acquire()) - if err != nil { - return err - } - t.sendQuotaPool.add(0) - // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.sendQuotaPool.acquire()) - if err != nil { - if _, ok := err.(StreamError); ok || err == io.EOF { - t.sendQuotaPool.cancel() - } - return err - } - if sq < size { - size = sq - } - if tq < size { - size = tq - } - p = r.Next(size) - ps := len(p) - if ps < sq { - // Overbooked stream quota. Return it back. - s.sendQuotaPool.add(sq - ps) - } - if ps < tq { - // Overbooked transport quota. Return it back. - t.sendQuotaPool.add(tq - ps) - } - } - var ( - endStream bool - forceFlush bool - ) - if opts.Last && r.Len() == 0 { - endStream = true - } - // Indicate there is a writer who is about to write a data frame. - t.framer.adjustNumWriters(1) - // Got some quota. Try to acquire writing privilege on the transport. - if _, err := wait(s.ctx, s.done, s.goAway, t.shutdownChan, t.writableChan); err != nil { - if _, ok := err.(StreamError); ok || err == io.EOF { - // Return the connection quota back. - t.sendQuotaPool.add(len(p)) - } - if t.framer.adjustNumWriters(-1) == 0 { - // This writer is the last one in this batch and has the - // responsibility to flush the buffered frames. It queues - // a flush request to controlBuf instead of flushing directly - // in order to avoid the race with other writing or flushing. - t.controlBuf.put(&flushIO{}) - } - return err - } - select { - case <-s.ctx.Done(): - t.sendQuotaPool.add(len(p)) - if t.framer.adjustNumWriters(-1) == 0 { - t.controlBuf.put(&flushIO{}) - } - t.writableChan <- 0 - return ContextErr(s.ctx.Err()) - default: - } - if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 { - // Do a force flush iff this is last frame for the entire gRPC message - // and the caller is the only writer at this moment. - forceFlush = true - } - // If WriteData fails, all the pending streams will be handled - // by http2Client.Close(). No explicit CloseStream() needs to be - // invoked. - if err := t.framer.writeData(forceFlush, s.id, endStream, p); err != nil { - t.notifyError(err) - return ConnectionErrorf(true, err, "transport: %v", err) - } - if t.framer.adjustNumWriters(-1) == 0 { - t.framer.flushWrite() - } - t.writableChan <- 0 - if r.Len() == 0 { - break - } - } - if !opts.Last { - return nil - } - s.mu.Lock() - if s.state != streamDone { - s.state = streamWriteDone - } - s.mu.Unlock() - return nil -} - -func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { - t.mu.Lock() - defer t.mu.Unlock() - s, ok := t.activeStreams[f.Header().StreamID] - return s, ok -} - -// updateWindow adjusts the inbound quota for the stream and the transport. -// Window updates will deliver to the controller for sending when -// the cumulative quota exceeds the corresponding threshold. -func (t *http2Client) updateWindow(s *Stream, n uint32) { - s.mu.Lock() - defer s.mu.Unlock() - if s.state == streamDone { - return - } - if w := t.fc.onRead(n); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - if w := s.fc.onRead(n); w > 0 { - t.controlBuf.put(&windowUpdate{s.id, w}) - } -} - -func (t *http2Client) handleData(f *http2.DataFrame) { - size := len(f.Data()) - if err := t.fc.onData(uint32(size)); err != nil { - t.notifyError(ConnectionErrorf(true, err, "%v", err)) - return - } - // Select the right stream to dispatch. - s, ok := t.getStream(f) - if !ok { - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - return - } - if size > 0 { - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - // The stream has been closed. Release the corresponding quota. - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - return - } - if err := s.fc.onData(uint32(size)); err != nil { - s.state = streamDone - s.statusCode = codes.Internal - s.statusDesc = err.Error() - close(s.done) - s.mu.Unlock() - s.write(recvMsg{err: io.EOF}) - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) - return - } - s.mu.Unlock() - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - data := make([]byte, size) - copy(data, f.Data()) - s.write(recvMsg{data: data}) - } - // The server has closed the stream without sending trailers. Record that - // the read direction is closed, and set the status appropriately. - if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - return - } - s.state = streamDone - s.statusCode = codes.Internal - s.statusDesc = "server closed the stream without sending trailers" - close(s.done) - s.mu.Unlock() - s.write(recvMsg{err: io.EOF}) - } -} - -func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { - s, ok := t.getStream(f) - if !ok { - return - } - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - return - } - s.state = streamDone - if !s.headerDone { - close(s.headerChan) - s.headerDone = true - } - s.statusCode, ok = http2ErrConvTab[http2.ErrCode(f.ErrCode)] - if !ok { - grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode) - s.statusCode = codes.Unknown - } - s.statusDesc = fmt.Sprintf("stream terminated by RST_STREAM with error code: %d", f.ErrCode) - close(s.done) - s.mu.Unlock() - s.write(recvMsg{err: io.EOF}) -} - -func (t *http2Client) handleSettings(f *http2.SettingsFrame) { - if f.IsAck() { - return - } - var ss []http2.Setting - f.ForeachSetting(func(s http2.Setting) error { - ss = append(ss, s) - return nil - }) - // The settings will be applied once the ack is sent. - t.controlBuf.put(&settings{ack: true, ss: ss}) -} - -func (t *http2Client) handlePing(f *http2.PingFrame) { - pingAck := &ping{ack: true} - copy(pingAck.data[:], f.Data[:]) - t.controlBuf.put(pingAck) -} - -func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { - t.mu.Lock() - if t.state == reachable || t.state == draining { - if f.LastStreamID > 0 && f.LastStreamID%2 != 1 { - t.mu.Unlock() - t.notifyError(ConnectionErrorf(true, nil, "received illegal http2 GOAWAY frame: stream ID %d is even", f.LastStreamID)) - return - } - select { - case <-t.goAway: - id := t.goAwayID - // t.goAway has been closed (i.e.,multiple GoAways). - if id < f.LastStreamID { - t.mu.Unlock() - t.notifyError(ConnectionErrorf(true, nil, "received illegal http2 GOAWAY frame: previously recv GOAWAY frame with LastStramID %d, currently recv %d", id, f.LastStreamID)) - return - } - t.prevGoAwayID = id - t.goAwayID = f.LastStreamID - t.mu.Unlock() - return - default: - } - t.goAwayID = f.LastStreamID - close(t.goAway) - } - t.mu.Unlock() -} - -func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { - id := f.Header().StreamID - incr := f.Increment - if id == 0 { - t.sendQuotaPool.add(int(incr)) - return - } - if s, ok := t.getStream(f); ok { - s.sendQuotaPool.add(int(incr)) - } -} - -// operateHeaders takes action on the decoded headers. -func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { - s, ok := t.getStream(frame) - if !ok { - return - } - var state decodeState - for _, hf := range frame.Fields { - state.processHeaderField(hf) - } - if state.err != nil { - s.write(recvMsg{err: state.err}) - // Something wrong. Stops reading even when there is remaining. - return - } - - endStream := frame.StreamEnded() - - s.mu.Lock() - if !endStream { - s.recvCompress = state.encoding - } - if !s.headerDone { - if !endStream && len(state.mdata) > 0 { - s.header = state.mdata - } - close(s.headerChan) - s.headerDone = true - } - if !endStream || s.state == streamDone { - s.mu.Unlock() - return - } - - if len(state.mdata) > 0 { - s.trailer = state.mdata - } - s.statusCode = state.statusCode - s.statusDesc = state.statusDesc - close(s.done) - s.state = streamDone - s.mu.Unlock() - s.write(recvMsg{err: io.EOF}) -} - -func handleMalformedHTTP2(s *Stream, err error) { - s.mu.Lock() - if !s.headerDone { - close(s.headerChan) - s.headerDone = true - } - s.mu.Unlock() - s.write(recvMsg{err: err}) -} - -// reader runs as a separate goroutine in charge of reading data from network -// connection. -// -// TODO(zhaoq): currently one reader per transport. Investigate whether this is -// optimal. -// TODO(zhaoq): Check the validity of the incoming frame sequence. -func (t *http2Client) reader() { - // Check the validity of server preface. - frame, err := t.framer.readFrame() - if err != nil { - t.notifyError(err) - return - } - sf, ok := frame.(*http2.SettingsFrame) - if !ok { - t.notifyError(err) - return - } - t.handleSettings(sf) - - // loop to keep reading incoming messages on this transport. - for { - frame, err := t.framer.readFrame() - if err != nil { - // Abort an active stream if the http2.Framer returns a - // http2.StreamError. This can happen only if the server's response - // is malformed http2. - if se, ok := err.(http2.StreamError); ok { - t.mu.Lock() - s := t.activeStreams[se.StreamID] - t.mu.Unlock() - if s != nil { - // use error detail to provide better err message - handleMalformedHTTP2(s, StreamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.errorDetail())) - } - continue - } else { - // Transport error. - t.notifyError(err) - return - } - } - switch frame := frame.(type) { - case *http2.MetaHeadersFrame: - t.operateHeaders(frame) - case *http2.DataFrame: - t.handleData(frame) - case *http2.RSTStreamFrame: - t.handleRSTStream(frame) - case *http2.SettingsFrame: - t.handleSettings(frame) - case *http2.PingFrame: - t.handlePing(frame) - case *http2.GoAwayFrame: - t.handleGoAway(frame) - case *http2.WindowUpdateFrame: - t.handleWindowUpdate(frame) - default: - grpclog.Printf("transport: http2Client.reader got unhandled frame type %v.", frame) - } - } -} - -func (t *http2Client) applySettings(ss []http2.Setting) { - for _, s := range ss { - switch s.ID { - case http2.SettingMaxConcurrentStreams: - // TODO(zhaoq): This is a hack to avoid significant refactoring of the - // code to deal with the unrealistic int32 overflow. Probably will try - // to find a better way to handle this later. - if s.Val > math.MaxInt32 { - s.Val = math.MaxInt32 - } - t.mu.Lock() - reset := t.streamsQuota != nil - if !reset { - t.streamsQuota = newQuotaPool(int(s.Val) - len(t.activeStreams)) - } - ms := t.maxStreams - t.maxStreams = int(s.Val) - t.mu.Unlock() - if reset { - t.streamsQuota.reset(int(s.Val) - ms) - } - case http2.SettingInitialWindowSize: - t.mu.Lock() - for _, stream := range t.activeStreams { - // Adjust the sending quota for each stream. - stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota)) - } - t.streamSendQuota = s.Val - t.mu.Unlock() - } - } -} - -// controller running in a separate goroutine takes charge of sending control -// frames (e.g., window update, reset stream, setting, etc.) to the server. -func (t *http2Client) controller() { - for { - select { - case i := <-t.controlBuf.get(): - t.controlBuf.load() - select { - case <-t.writableChan: - switch i := i.(type) { - case *windowUpdate: - t.framer.writeWindowUpdate(true, i.streamID, i.increment) - case *settings: - if i.ack { - t.framer.writeSettingsAck(true) - t.applySettings(i.ss) - } else { - t.framer.writeSettings(true, i.ss...) - } - case *resetStream: - t.framer.writeRSTStream(true, i.streamID, i.code) - case *flushIO: - t.framer.flushWrite() - case *ping: - t.framer.writePing(true, i.ack, i.data) - default: - grpclog.Printf("transport: http2Client.controller got unexpected item type %v\n", i) - } - t.writableChan <- 0 - continue - case <-t.shutdownChan: - return - } - case <-t.shutdownChan: - return - } - } -} - -func (t *http2Client) Error() <-chan struct{} { - return t.errorChan -} - -func (t *http2Client) GoAway() <-chan struct{} { - return t.goAway -} - -func (t *http2Client) notifyError(err error) { - t.mu.Lock() - // make sure t.errorChan is closed only once. - if t.state == draining { - t.mu.Unlock() - t.Close() - return - } - if t.state == reachable { - t.state = unreachable - close(t.errorChan) - grpclog.Printf("transport: http2Client.notifyError got notified that the client transport was broken %v.", err) - } - t.mu.Unlock() -} diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go deleted file mode 100644 index 16010d55fb..0000000000 --- a/vendor/google.golang.org/grpc/transport/http2_server.go +++ /dev/null @@ -1,774 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "bytes" - "errors" - "io" - "math" - "net" - "strconv" - "sync" - - "golang.org/x/net/context" - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/peer" -) - -// ErrIllegalHeaderWrite indicates that setting header is illegal because of -// the stream's state. -var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") - -// http2Server implements the ServerTransport interface with HTTP2. -type http2Server struct { - conn net.Conn - maxStreamID uint32 // max stream ID ever seen - authInfo credentials.AuthInfo // auth info about the connection - // writableChan synchronizes write access to the transport. - // A writer acquires the write lock by receiving a value on writableChan - // and releases it by sending on writableChan. - writableChan chan int - // shutdownChan is closed when Close is called. - // Blocking operations should select on shutdownChan to avoid - // blocking forever after Close. - shutdownChan chan struct{} - framer *framer - hBuf *bytes.Buffer // the buffer for HPACK encoding - hEnc *hpack.Encoder // HPACK encoder - - // The max number of concurrent streams. - maxStreams uint32 - // controlBuf delivers all the control related tasks (e.g., window - // updates, reset streams, and various settings) to the controller. - controlBuf *recvBuffer - fc *inFlow - // sendQuotaPool provides flow control to outbound message. - sendQuotaPool *quotaPool - - mu sync.Mutex // guard the following - state transportState - activeStreams map[uint32]*Stream - // the per-stream outbound flow control window size set by the peer. - streamSendQuota uint32 -} - -// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is -// returned if something goes wrong. -func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (_ ServerTransport, err error) { - framer := newFramer(conn) - // Send initial settings as connection preface to client. - var settings []http2.Setting - // TODO(zhaoq): Have a better way to signal "no limit" because 0 is - // permitted in the HTTP2 spec. - if maxStreams == 0 { - maxStreams = math.MaxUint32 - } else { - settings = append(settings, http2.Setting{ - ID: http2.SettingMaxConcurrentStreams, - Val: maxStreams, - }) - } - if initialWindowSize != defaultWindowSize { - settings = append(settings, http2.Setting{ - ID: http2.SettingInitialWindowSize, - Val: uint32(initialWindowSize)}) - } - if err := framer.writeSettings(true, settings...); err != nil { - return nil, ConnectionErrorf(true, err, "transport: %v", err) - } - // Adjust the connection flow control window if needed. - if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 { - if err := framer.writeWindowUpdate(true, 0, delta); err != nil { - return nil, ConnectionErrorf(true, err, "transport: %v", err) - } - } - var buf bytes.Buffer - t := &http2Server{ - conn: conn, - authInfo: authInfo, - framer: framer, - hBuf: &buf, - hEnc: hpack.NewEncoder(&buf), - maxStreams: maxStreams, - controlBuf: newRecvBuffer(), - fc: &inFlow{limit: initialConnWindowSize}, - sendQuotaPool: newQuotaPool(defaultWindowSize), - state: reachable, - writableChan: make(chan int, 1), - shutdownChan: make(chan struct{}), - activeStreams: make(map[uint32]*Stream), - streamSendQuota: defaultWindowSize, - } - go t.controller() - t.writableChan <- 0 - return t, nil -} - -// operateHeader takes action on the decoded headers. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) (close bool) { - buf := newRecvBuffer() - s := &Stream{ - id: frame.Header().StreamID, - st: t, - buf: buf, - fc: &inFlow{limit: initialWindowSize}, - } - - var state decodeState - for _, hf := range frame.Fields { - state.processHeaderField(hf) - } - if err := state.err; err != nil { - if se, ok := err.(StreamError); ok { - t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]}) - } - return - } - - if frame.StreamEnded() { - // s is just created by the caller. No lock needed. - s.state = streamReadDone - } - s.recvCompress = state.encoding - if state.timeoutSet { - s.ctx, s.cancel = context.WithTimeout(context.TODO(), state.timeout) - } else { - s.ctx, s.cancel = context.WithCancel(context.TODO()) - } - pr := &peer.Peer{ - Addr: t.conn.RemoteAddr(), - } - // Attach Auth info if there is any. - if t.authInfo != nil { - pr.AuthInfo = t.authInfo - } - s.ctx = peer.NewContext(s.ctx, pr) - // Cache the current stream to the context so that the server application - // can find out. Required when the server wants to send some metadata - // back to the client (unary call only). - s.ctx = newContextWithStream(s.ctx, s) - // Attach the received metadata to the context. - if len(state.mdata) > 0 { - s.ctx = metadata.NewContext(s.ctx, state.mdata) - } - - s.dec = &recvBufferReader{ - ctx: s.ctx, - recv: s.buf, - } - s.recvCompress = state.encoding - s.method = state.method - t.mu.Lock() - if t.state != reachable { - t.mu.Unlock() - return - } - if uint32(len(t.activeStreams)) >= t.maxStreams { - t.mu.Unlock() - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream}) - return - } - if s.id%2 != 1 || s.id <= t.maxStreamID { - t.mu.Unlock() - // illegal gRPC stream id. - grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", s.id) - return true - } - t.maxStreamID = s.id - s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota)) - t.activeStreams[s.id] = s - t.mu.Unlock() - s.windowHandler = func(n int) { - t.updateWindow(s, uint32(n)) - } - handle(s) - return -} - -// HandleStreams receives incoming streams using the given handler. This is -// typically run in a separate goroutine. -func (t *http2Server) HandleStreams(handle func(*Stream)) { - // Check the validity of client preface. - preface := make([]byte, len(clientPreface)) - if _, err := io.ReadFull(t.conn, preface); err != nil { - grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) - t.Close() - return - } - if !bytes.Equal(preface, clientPreface) { - grpclog.Printf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) - t.Close() - return - } - - frame, err := t.framer.readFrame() - if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() - return - } - if err != nil { - grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) - t.Close() - return - } - sf, ok := frame.(*http2.SettingsFrame) - if !ok { - grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) - t.Close() - return - } - t.handleSettings(sf) - - for { - frame, err := t.framer.readFrame() - if err != nil { - if se, ok := err.(http2.StreamError); ok { - t.mu.Lock() - s := t.activeStreams[se.StreamID] - t.mu.Unlock() - if s != nil { - t.closeStream(s) - } - t.controlBuf.put(&resetStream{se.StreamID, se.Code}) - continue - } - if err == io.EOF || err == io.ErrUnexpectedEOF { - t.Close() - return - } - grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err) - t.Close() - return - } - switch frame := frame.(type) { - case *http2.MetaHeadersFrame: - if t.operateHeaders(frame, handle) { - t.Close() - break - } - case *http2.DataFrame: - t.handleData(frame) - case *http2.RSTStreamFrame: - t.handleRSTStream(frame) - case *http2.SettingsFrame: - t.handleSettings(frame) - case *http2.PingFrame: - t.handlePing(frame) - case *http2.WindowUpdateFrame: - t.handleWindowUpdate(frame) - case *http2.GoAwayFrame: - // TODO: Handle GoAway from the client appropriately. - default: - grpclog.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) - } - } -} - -func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { - t.mu.Lock() - defer t.mu.Unlock() - if t.activeStreams == nil { - // The transport is closing. - return nil, false - } - s, ok := t.activeStreams[f.Header().StreamID] - if !ok { - // The stream is already done. - return nil, false - } - return s, true -} - -// updateWindow adjusts the inbound quota for the stream and the transport. -// Window updates will deliver to the controller for sending when -// the cumulative quota exceeds the corresponding threshold. -func (t *http2Server) updateWindow(s *Stream, n uint32) { - s.mu.Lock() - defer s.mu.Unlock() - if s.state == streamDone { - return - } - if w := t.fc.onRead(n); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - if w := s.fc.onRead(n); w > 0 { - t.controlBuf.put(&windowUpdate{s.id, w}) - } -} - -func (t *http2Server) handleData(f *http2.DataFrame) { - size := len(f.Data()) - if err := t.fc.onData(uint32(size)); err != nil { - grpclog.Printf("transport: http2Server %v", err) - t.Close() - return - } - // Select the right stream to dispatch. - s, ok := t.getStream(f) - if !ok { - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - return - } - if size > 0 { - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - // The stream has been closed. Release the corresponding quota. - if w := t.fc.onRead(uint32(size)); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - return - } - if err := s.fc.onData(uint32(size)); err != nil { - s.mu.Unlock() - t.closeStream(s) - t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl}) - return - } - s.mu.Unlock() - // TODO(bradfitz, zhaoq): A copy is required here because there is no - // guarantee f.Data() is consumed before the arrival of next frame. - // Can this copy be eliminated? - data := make([]byte, size) - copy(data, f.Data()) - s.write(recvMsg{data: data}) - } - if f.Header().Flags.Has(http2.FlagDataEndStream) { - // Received the end of stream from the client. - s.mu.Lock() - if s.state != streamDone { - s.state = streamReadDone - } - s.mu.Unlock() - s.write(recvMsg{err: io.EOF}) - } -} - -func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { - s, ok := t.getStream(f) - if !ok { - return - } - t.closeStream(s) -} - -func (t *http2Server) handleSettings(f *http2.SettingsFrame) { - if f.IsAck() { - return - } - var ss []http2.Setting - f.ForeachSetting(func(s http2.Setting) error { - ss = append(ss, s) - return nil - }) - // The settings will be applied once the ack is sent. - t.controlBuf.put(&settings{ack: true, ss: ss}) -} - -func (t *http2Server) handlePing(f *http2.PingFrame) { - pingAck := &ping{ack: true} - copy(pingAck.data[:], f.Data[:]) - t.controlBuf.put(pingAck) -} - -func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { - id := f.Header().StreamID - incr := f.Increment - if id == 0 { - t.sendQuotaPool.add(int(incr)) - return - } - if s, ok := t.getStream(f); ok { - s.sendQuotaPool.add(int(incr)) - } -} - -func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) error { - first := true - endHeaders := false - var err error - // Sends the headers in a single batch. - for !endHeaders { - size := t.hBuf.Len() - if size > http2MaxFrameLen { - size = http2MaxFrameLen - } else { - endHeaders = true - } - if first { - p := http2.HeadersFrameParam{ - StreamID: s.id, - BlockFragment: b.Next(size), - EndStream: endStream, - EndHeaders: endHeaders, - } - err = t.framer.writeHeaders(endHeaders, p) - first = false - } else { - err = t.framer.writeContinuation(endHeaders, s.id, endHeaders, b.Next(size)) - } - if err != nil { - t.Close() - return ConnectionErrorf(true, err, "transport: %v", err) - } - } - return nil -} - -// WriteHeader sends the header metedata md back to the client. -func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { - s.mu.Lock() - if s.headerOk || s.state == streamDone { - s.mu.Unlock() - return ErrIllegalHeaderWrite - } - s.headerOk = true - s.mu.Unlock() - if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { - return err - } - t.hBuf.Reset() - t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) - if s.sendCompress != "" { - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) - } - for k, v := range md { - if isReservedHeader(k) { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - continue - } - for _, entry := range v { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) - } - } - if err := t.writeHeaders(s, t.hBuf, false); err != nil { - return err - } - t.writableChan <- 0 - return nil -} - -// WriteStatus sends stream status to the client and terminates the stream. -// There is no further I/O operations being able to perform on this stream. -// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early -// OK is adopted. -func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error { - var headersSent bool - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - return nil - } - if s.headerOk { - headersSent = true - } - s.mu.Unlock() - if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { - return err - } - t.hBuf.Reset() - if !headersSent { - t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) - } - t.hEnc.WriteField( - hpack.HeaderField{ - Name: "grpc-status", - Value: strconv.Itoa(int(statusCode)), - }) - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(statusDesc)}) - // Attach the trailer metadata. - for k, v := range s.trailer { - // Clients don't tolerate reading restricted headers after some non restricted ones were sent. - if isReservedHeader(k) { - continue - } - for _, entry := range v { - t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry}) - } - } - if err := t.writeHeaders(s, t.hBuf, true); err != nil { - t.Close() - return err - } - t.closeStream(s) - t.writableChan <- 0 - return nil -} - -// Write converts the data into HTTP2 data frame and sends it out. Non-nil error -// is returns if it fails (e.g., framing error, transport error). -func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error { - // TODO(zhaoq): Support multi-writers for a single stream. - var writeHeaderFrame bool - s.mu.Lock() - if s.state == streamDone { - s.mu.Unlock() - return StreamErrorf(codes.Unknown, "the stream has been done") - } - if !s.headerOk { - writeHeaderFrame = true - s.headerOk = true - } - s.mu.Unlock() - if writeHeaderFrame { - if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { - return err - } - t.hBuf.Reset() - t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"}) - t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"}) - if s.sendCompress != "" { - t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) - } - p := http2.HeadersFrameParam{ - StreamID: s.id, - BlockFragment: t.hBuf.Bytes(), - EndHeaders: true, - } - if err := t.framer.writeHeaders(false, p); err != nil { - t.Close() - return ConnectionErrorf(true, err, "transport: %v", err) - } - t.writableChan <- 0 - } - r := bytes.NewBuffer(data) - for { - if r.Len() == 0 { - return nil - } - size := http2MaxFrameLen - s.sendQuotaPool.add(0) - // Wait until the stream has some quota to send the data. - sq, err := wait(s.ctx, nil, nil, t.shutdownChan, s.sendQuotaPool.acquire()) - if err != nil { - return err - } - t.sendQuotaPool.add(0) - // Wait until the transport has some quota to send the data. - tq, err := wait(s.ctx, nil, nil, t.shutdownChan, t.sendQuotaPool.acquire()) - if err != nil { - if _, ok := err.(StreamError); ok { - t.sendQuotaPool.cancel() - } - return err - } - if sq < size { - size = sq - } - if tq < size { - size = tq - } - p := r.Next(size) - ps := len(p) - if ps < sq { - // Overbooked stream quota. Return it back. - s.sendQuotaPool.add(sq - ps) - } - if ps < tq { - // Overbooked transport quota. Return it back. - t.sendQuotaPool.add(tq - ps) - } - t.framer.adjustNumWriters(1) - // Got some quota. Try to acquire writing privilege on the - // transport. - if _, err := wait(s.ctx, nil, nil, t.shutdownChan, t.writableChan); err != nil { - if _, ok := err.(StreamError); ok { - // Return the connection quota back. - t.sendQuotaPool.add(ps) - } - if t.framer.adjustNumWriters(-1) == 0 { - // This writer is the last one in this batch and has the - // responsibility to flush the buffered frames. It queues - // a flush request to controlBuf instead of flushing directly - // in order to avoid the race with other writing or flushing. - t.controlBuf.put(&flushIO{}) - } - return err - } - select { - case <-s.ctx.Done(): - t.sendQuotaPool.add(ps) - if t.framer.adjustNumWriters(-1) == 0 { - t.controlBuf.put(&flushIO{}) - } - t.writableChan <- 0 - return ContextErr(s.ctx.Err()) - default: - } - var forceFlush bool - if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last { - forceFlush = true - } - if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil { - t.Close() - return ConnectionErrorf(true, err, "transport: %v", err) - } - if t.framer.adjustNumWriters(-1) == 0 { - t.framer.flushWrite() - } - t.writableChan <- 0 - } - -} - -func (t *http2Server) applySettings(ss []http2.Setting) { - for _, s := range ss { - if s.ID == http2.SettingInitialWindowSize { - t.mu.Lock() - defer t.mu.Unlock() - for _, stream := range t.activeStreams { - stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota)) - } - t.streamSendQuota = s.Val - } - - } -} - -// controller running in a separate goroutine takes charge of sending control -// frames (e.g., window update, reset stream, setting, etc.) to the server. -func (t *http2Server) controller() { - for { - select { - case i := <-t.controlBuf.get(): - t.controlBuf.load() - select { - case <-t.writableChan: - switch i := i.(type) { - case *windowUpdate: - t.framer.writeWindowUpdate(true, i.streamID, i.increment) - case *settings: - if i.ack { - t.framer.writeSettingsAck(true) - t.applySettings(i.ss) - } else { - t.framer.writeSettings(true, i.ss...) - } - case *resetStream: - t.framer.writeRSTStream(true, i.streamID, i.code) - case *goAway: - t.mu.Lock() - if t.state == closing { - t.mu.Unlock() - // The transport is closing. - return - } - sid := t.maxStreamID - t.state = draining - t.mu.Unlock() - t.framer.writeGoAway(true, sid, http2.ErrCodeNo, nil) - case *flushIO: - t.framer.flushWrite() - case *ping: - t.framer.writePing(true, i.ack, i.data) - default: - grpclog.Printf("transport: http2Server.controller got unexpected item type %v\n", i) - } - t.writableChan <- 0 - continue - case <-t.shutdownChan: - return - } - case <-t.shutdownChan: - return - } - } -} - -// Close starts shutting down the http2Server transport. -// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This -// could cause some resource issue. Revisit this later. -func (t *http2Server) Close() (err error) { - t.mu.Lock() - if t.state == closing { - t.mu.Unlock() - return errors.New("transport: Close() was already called") - } - t.state = closing - streams := t.activeStreams - t.activeStreams = nil - t.mu.Unlock() - close(t.shutdownChan) - err = t.conn.Close() - // Cancel all active streams. - for _, s := range streams { - s.cancel() - } - return -} - -// closeStream clears the footprint of a stream when the stream is not needed -// any more. -func (t *http2Server) closeStream(s *Stream) { - t.mu.Lock() - delete(t.activeStreams, s.id) - if t.state == draining && len(t.activeStreams) == 0 { - defer t.Close() - } - t.mu.Unlock() - // In case stream sending and receiving are invoked in separate - // goroutines (e.g., bi-directional streaming), cancel needs to be - // called to interrupt the potential blocking on other goroutines. - s.cancel() - s.mu.Lock() - if q := s.fc.resetPendingData(); q > 0 { - if w := t.fc.onRead(q); w > 0 { - t.controlBuf.put(&windowUpdate{0, w}) - } - } - if s.state == streamDone { - s.mu.Unlock() - return - } - s.state = streamDone - s.mu.Unlock() -} - -func (t *http2Server) RemoteAddr() net.Addr { - return t.conn.RemoteAddr() -} - -func (t *http2Server) Drain() { - t.controlBuf.put(&goAway{}) -} diff --git a/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/google.golang.org/grpc/transport/http_util.go deleted file mode 100644 index 79da512640..0000000000 --- a/vendor/google.golang.org/grpc/transport/http_util.go +++ /dev/null @@ -1,510 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "bufio" - "bytes" - "fmt" - "io" - "net" - "strconv" - "strings" - "sync/atomic" - "time" - - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/metadata" -) - -const ( - // The primary user agent - primaryUA = "grpc-go/1.0" - // http2MaxFrameLen specifies the max length of a HTTP2 frame. - http2MaxFrameLen = 16384 // 16KB frame - // http://http2.github.io/http2-spec/#SettingValues - http2InitHeaderTableSize = 4096 - // http2IOBufSize specifies the buffer size for sending frames. - http2IOBufSize = 32 * 1024 -) - -var ( - clientPreface = []byte(http2.ClientPreface) - http2ErrConvTab = map[http2.ErrCode]codes.Code{ - http2.ErrCodeNo: codes.Internal, - http2.ErrCodeProtocol: codes.Internal, - http2.ErrCodeInternal: codes.Internal, - http2.ErrCodeFlowControl: codes.ResourceExhausted, - http2.ErrCodeSettingsTimeout: codes.Internal, - http2.ErrCodeStreamClosed: codes.Internal, - http2.ErrCodeFrameSize: codes.Internal, - http2.ErrCodeRefusedStream: codes.Unavailable, - http2.ErrCodeCancel: codes.Canceled, - http2.ErrCodeCompression: codes.Internal, - http2.ErrCodeConnect: codes.Internal, - http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, - http2.ErrCodeInadequateSecurity: codes.PermissionDenied, - http2.ErrCodeHTTP11Required: codes.FailedPrecondition, - } - statusCodeConvTab = map[codes.Code]http2.ErrCode{ - codes.Internal: http2.ErrCodeInternal, - codes.Canceled: http2.ErrCodeCancel, - codes.Unavailable: http2.ErrCodeRefusedStream, - codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, - codes.PermissionDenied: http2.ErrCodeInadequateSecurity, - } -) - -// Records the states during HPACK decoding. Must be reset once the -// decoding of the entire headers are finished. -type decodeState struct { - err error // first error encountered decoding - - encoding string - // statusCode caches the stream status received from the trailer - // the server sent. Client side only. - statusCode codes.Code - statusDesc string - // Server side only fields. - timeoutSet bool - timeout time.Duration - method string - // key-value metadata map from the peer. - mdata map[string][]string -} - -// isReservedHeader checks whether hdr belongs to HTTP2 headers -// reserved by gRPC protocol. Any other headers are classified as the -// user-specified metadata. -func isReservedHeader(hdr string) bool { - if hdr != "" && hdr[0] == ':' { - return true - } - switch hdr { - case "content-type", - "grpc-message-type", - "grpc-encoding", - "grpc-message", - "grpc-status", - "grpc-timeout", - "te": - return true - default: - return false - } -} - -// isWhitelistedPseudoHeader checks whether hdr belongs to HTTP2 pseudoheaders -// that should be propagated into metadata visible to users. -func isWhitelistedPseudoHeader(hdr string) bool { - switch hdr { - case ":authority": - return true - default: - return false - } -} - -func (d *decodeState) setErr(err error) { - if d.err == nil { - d.err = err - } -} - -func validContentType(t string) bool { - e := "application/grpc" - if !strings.HasPrefix(t, e) { - return false - } - // Support variations on the content-type - // (e.g. "application/grpc+blah", "application/grpc;blah"). - if len(t) > len(e) && t[len(e)] != '+' && t[len(e)] != ';' { - return false - } - return true -} - -func (d *decodeState) processHeaderField(f hpack.HeaderField) { - switch f.Name { - case "content-type": - if !validContentType(f.Value) { - d.setErr(StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value)) - return - } - case "grpc-encoding": - d.encoding = f.Value - case "grpc-status": - code, err := strconv.Atoi(f.Value) - if err != nil { - d.setErr(StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err)) - return - } - d.statusCode = codes.Code(code) - case "grpc-message": - d.statusDesc = decodeGrpcMessage(f.Value) - case "grpc-timeout": - d.timeoutSet = true - var err error - d.timeout, err = decodeTimeout(f.Value) - if err != nil { - d.setErr(StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err)) - return - } - case ":path": - d.method = f.Value - default: - if !isReservedHeader(f.Name) || isWhitelistedPseudoHeader(f.Name) { - if f.Name == "user-agent" { - i := strings.LastIndex(f.Value, " ") - if i == -1 { - // There is no application user agent string being set. - return - } - // Extract the application user agent string. - f.Value = f.Value[:i] - } - if d.mdata == nil { - d.mdata = make(map[string][]string) - } - k, v, err := metadata.DecodeKeyValue(f.Name, f.Value) - if err != nil { - grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err) - return - } - d.mdata[k] = append(d.mdata[k], v) - } - } -} - -type timeoutUnit uint8 - -const ( - hour timeoutUnit = 'H' - minute timeoutUnit = 'M' - second timeoutUnit = 'S' - millisecond timeoutUnit = 'm' - microsecond timeoutUnit = 'u' - nanosecond timeoutUnit = 'n' -) - -func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { - switch u { - case hour: - return time.Hour, true - case minute: - return time.Minute, true - case second: - return time.Second, true - case millisecond: - return time.Millisecond, true - case microsecond: - return time.Microsecond, true - case nanosecond: - return time.Nanosecond, true - default: - } - return -} - -const maxTimeoutValue int64 = 100000000 - 1 - -// div does integer division and round-up the result. Note that this is -// equivalent to (d+r-1)/r but has less chance to overflow. -func div(d, r time.Duration) int64 { - if m := d % r; m > 0 { - return int64(d/r + 1) - } - return int64(d / r) -} - -// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. -func encodeTimeout(t time.Duration) string { - if d := div(t, time.Nanosecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "n" - } - if d := div(t, time.Microsecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "u" - } - if d := div(t, time.Millisecond); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "m" - } - if d := div(t, time.Second); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "S" - } - if d := div(t, time.Minute); d <= maxTimeoutValue { - return strconv.FormatInt(d, 10) + "M" - } - // Note that maxTimeoutValue * time.Hour > MaxInt64. - return strconv.FormatInt(div(t, time.Hour), 10) + "H" -} - -func decodeTimeout(s string) (time.Duration, error) { - size := len(s) - if size < 2 { - return 0, fmt.Errorf("transport: timeout string is too short: %q", s) - } - unit := timeoutUnit(s[size-1]) - d, ok := timeoutUnitToDuration(unit) - if !ok { - return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) - } - t, err := strconv.ParseInt(s[:size-1], 10, 64) - if err != nil { - return 0, err - } - return d * time.Duration(t), nil -} - -const ( - spaceByte = ' ' - tildaByte = '~' - percentByte = '%' -) - -// encodeGrpcMessage is used to encode status code in header field -// "grpc-message". -// It checks to see if each individual byte in msg is an -// allowable byte, and then either percent encoding or passing it through. -// When percent encoding, the byte is converted into hexadecimal notation -// with a '%' prepended. -func encodeGrpcMessage(msg string) string { - if msg == "" { - return "" - } - lenMsg := len(msg) - for i := 0; i < lenMsg; i++ { - c := msg[i] - if !(c >= spaceByte && c < tildaByte && c != percentByte) { - return encodeGrpcMessageUnchecked(msg) - } - } - return msg -} - -func encodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer - lenMsg := len(msg) - for i := 0; i < lenMsg; i++ { - c := msg[i] - if c >= spaceByte && c < tildaByte && c != percentByte { - buf.WriteByte(c) - } else { - buf.WriteString(fmt.Sprintf("%%%02X", c)) - } - } - return buf.String() -} - -// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. -func decodeGrpcMessage(msg string) string { - if msg == "" { - return "" - } - lenMsg := len(msg) - for i := 0; i < lenMsg; i++ { - if msg[i] == percentByte && i+2 < lenMsg { - return decodeGrpcMessageUnchecked(msg) - } - } - return msg -} - -func decodeGrpcMessageUnchecked(msg string) string { - var buf bytes.Buffer - lenMsg := len(msg) - for i := 0; i < lenMsg; i++ { - c := msg[i] - if c == percentByte && i+2 < lenMsg { - parsed, err := strconv.ParseInt(msg[i+1:i+3], 16, 8) - if err != nil { - buf.WriteByte(c) - } else { - buf.WriteByte(byte(parsed)) - i += 2 - } - } else { - buf.WriteByte(c) - } - } - return buf.String() -} - -type framer struct { - numWriters int32 - reader io.Reader - writer *bufio.Writer - fr *http2.Framer -} - -func newFramer(conn net.Conn) *framer { - f := &framer{ - reader: bufio.NewReaderSize(conn, http2IOBufSize), - writer: bufio.NewWriterSize(conn, http2IOBufSize), - } - f.fr = http2.NewFramer(f.writer, f.reader) - f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) - return f -} - -func (f *framer) adjustNumWriters(i int32) int32 { - return atomic.AddInt32(&f.numWriters, i) -} - -// The following writeXXX functions can only be called when the caller gets -// unblocked from writableChan channel (i.e., owns the privilege to write). - -func (f *framer) writeContinuation(forceFlush bool, streamID uint32, endHeaders bool, headerBlockFragment []byte) error { - if err := f.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeData(forceFlush bool, streamID uint32, endStream bool, data []byte) error { - if err := f.fr.WriteData(streamID, endStream, data); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeGoAway(forceFlush bool, maxStreamID uint32, code http2.ErrCode, debugData []byte) error { - if err := f.fr.WriteGoAway(maxStreamID, code, debugData); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeHeaders(forceFlush bool, p http2.HeadersFrameParam) error { - if err := f.fr.WriteHeaders(p); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writePing(forceFlush, ack bool, data [8]byte) error { - if err := f.fr.WritePing(ack, data); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writePriority(forceFlush bool, streamID uint32, p http2.PriorityParam) error { - if err := f.fr.WritePriority(streamID, p); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writePushPromise(forceFlush bool, p http2.PushPromiseParam) error { - if err := f.fr.WritePushPromise(p); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeRSTStream(forceFlush bool, streamID uint32, code http2.ErrCode) error { - if err := f.fr.WriteRSTStream(streamID, code); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeSettings(forceFlush bool, settings ...http2.Setting) error { - if err := f.fr.WriteSettings(settings...); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeSettingsAck(forceFlush bool) error { - if err := f.fr.WriteSettingsAck(); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) writeWindowUpdate(forceFlush bool, streamID, incr uint32) error { - if err := f.fr.WriteWindowUpdate(streamID, incr); err != nil { - return err - } - if forceFlush { - return f.writer.Flush() - } - return nil -} - -func (f *framer) flushWrite() error { - return f.writer.Flush() -} - -func (f *framer) readFrame() (http2.Frame, error) { - return f.fr.ReadFrame() -} - -func (f *framer) errorDetail() error { - return f.fr.ErrorDetail() -} diff --git a/vendor/google.golang.org/grpc/transport/pre_go16.go b/vendor/google.golang.org/grpc/transport/pre_go16.go deleted file mode 100644 index 33d91c17c4..0000000000 --- a/vendor/google.golang.org/grpc/transport/pre_go16.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build !go1.6 - -/* - * Copyright 2016, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -package transport - -import ( - "net" - "time" - - "golang.org/x/net/context" -) - -// dialContext connects to the address on the named network. -func dialContext(ctx context.Context, network, address string) (net.Conn, error) { - var dialer net.Dialer - if deadline, ok := ctx.Deadline(); ok { - dialer.Timeout = deadline.Sub(time.Now()) - } - return dialer.Dial(network, address) -} diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go deleted file mode 100644 index b31769abe1..0000000000 --- a/vendor/google.golang.org/grpc/transport/transport.go +++ /dev/null @@ -1,578 +0,0 @@ -/* - * - * Copyright 2014, Google Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following disclaimer - * in the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Google Inc. nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - */ - -/* -Package transport defines and implements message oriented communication channel -to complete various transactions (e.g., an RPC). -*/ -package transport // import "google.golang.org/grpc/transport" - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "sync" - - "golang.org/x/net/context" - "golang.org/x/net/trace" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/credentials" - "google.golang.org/grpc/metadata" -) - -// recvMsg represents the received msg from the transport. All transport -// protocol specific info has been removed. -type recvMsg struct { - data []byte - // nil: received some data - // io.EOF: stream is completed. data is nil. - // other non-nil error: transport failure. data is nil. - err error -} - -func (*recvMsg) item() {} - -// All items in an out of a recvBuffer should be the same type. -type item interface { - item() -} - -// recvBuffer is an unbounded channel of item. -type recvBuffer struct { - c chan item - mu sync.Mutex - backlog []item -} - -func newRecvBuffer() *recvBuffer { - b := &recvBuffer{ - c: make(chan item, 1), - } - return b -} - -func (b *recvBuffer) put(r item) { - b.mu.Lock() - defer b.mu.Unlock() - if len(b.backlog) == 0 { - select { - case b.c <- r: - return - default: - } - } - b.backlog = append(b.backlog, r) -} - -func (b *recvBuffer) load() { - b.mu.Lock() - defer b.mu.Unlock() - if len(b.backlog) > 0 { - select { - case b.c <- b.backlog[0]: - b.backlog = b.backlog[1:] - default: - } - } -} - -// get returns the channel that receives an item in the buffer. -// -// Upon receipt of an item, the caller should call load to send another -// item onto the channel if there is any. -func (b *recvBuffer) get() <-chan item { - return b.c -} - -// recvBufferReader implements io.Reader interface to read the data from -// recvBuffer. -type recvBufferReader struct { - ctx context.Context - goAway chan struct{} - recv *recvBuffer - last *bytes.Reader // Stores the remaining data in the previous calls. - err error -} - -// Read reads the next len(p) bytes from last. If last is drained, it tries to -// read additional data from recv. It blocks if there no additional data available -// in recv. If Read returns any non-nil error, it will continue to return that error. -func (r *recvBufferReader) Read(p []byte) (n int, err error) { - if r.err != nil { - return 0, r.err - } - defer func() { r.err = err }() - if r.last != nil && r.last.Len() > 0 { - // Read remaining data left in last call. - return r.last.Read(p) - } - select { - case <-r.ctx.Done(): - return 0, ContextErr(r.ctx.Err()) - case <-r.goAway: - return 0, ErrStreamDrain - case i := <-r.recv.get(): - r.recv.load() - m := i.(*recvMsg) - if m.err != nil { - return 0, m.err - } - r.last = bytes.NewReader(m.data) - return r.last.Read(p) - } -} - -type streamState uint8 - -const ( - streamActive streamState = iota - streamWriteDone // EndStream sent - streamReadDone // EndStream received - streamDone // the entire stream is finished. -) - -// Stream represents an RPC in the transport layer. -type Stream struct { - id uint32 - // nil for client side Stream. - st ServerTransport - // ctx is the associated context of the stream. - ctx context.Context - cancel context.CancelFunc - // done is closed when the final status arrives. - done chan struct{} - // goAway is closed when the server sent GoAways signal before this stream was initiated. - goAway chan struct{} - // method records the associated RPC method of the stream. - method string - recvCompress string - sendCompress string - buf *recvBuffer - dec io.Reader - fc *inFlow - recvQuota uint32 - // The accumulated inbound quota pending for window update. - updateQuota uint32 - // The handler to control the window update procedure for both this - // particular stream and the associated transport. - windowHandler func(int) - - sendQuotaPool *quotaPool - // Close headerChan to indicate the end of reception of header metadata. - headerChan chan struct{} - // header caches the received header metadata. - header metadata.MD - // The key-value map of trailer metadata. - trailer metadata.MD - - mu sync.RWMutex // guard the following - // headerOK becomes true from the first header is about to send. - headerOk bool - state streamState - // true iff headerChan is closed. Used to avoid closing headerChan - // multiple times. - headerDone bool - // the status received from the server. - statusCode codes.Code - statusDesc string -} - -// RecvCompress returns the compression algorithm applied to the inbound -// message. It is empty string if there is no compression applied. -func (s *Stream) RecvCompress() string { - return s.recvCompress -} - -// SetSendCompress sets the compression algorithm to the stream. -func (s *Stream) SetSendCompress(str string) { - s.sendCompress = str -} - -// Done returns a chanel which is closed when it receives the final status -// from the server. -func (s *Stream) Done() <-chan struct{} { - return s.done -} - -// GoAway returns a channel which is closed when the server sent GoAways signal -// before this stream was initiated. -func (s *Stream) GoAway() <-chan struct{} { - return s.goAway -} - -// Header acquires the key-value pairs of header metadata once it -// is available. It blocks until i) the metadata is ready or ii) there is no -// header metadata or iii) the stream is cancelled/expired. -func (s *Stream) Header() (metadata.MD, error) { - select { - case <-s.ctx.Done(): - return nil, ContextErr(s.ctx.Err()) - case <-s.goAway: - return nil, ErrStreamDrain - case <-s.headerChan: - return s.header.Copy(), nil - } -} - -// Trailer returns the cached trailer metedata. Note that if it is not called -// after the entire stream is done, it could return an empty MD. Client -// side only. -func (s *Stream) Trailer() metadata.MD { - s.mu.RLock() - defer s.mu.RUnlock() - return s.trailer.Copy() -} - -// ServerTransport returns the underlying ServerTransport for the stream. -// The client side stream always returns nil. -func (s *Stream) ServerTransport() ServerTransport { - return s.st -} - -// Context returns the context of the stream. -func (s *Stream) Context() context.Context { - return s.ctx -} - -// TraceContext recreates the context of s with a trace.Trace. -func (s *Stream) TraceContext(tr trace.Trace) { - s.ctx = trace.NewContext(s.ctx, tr) -} - -// Method returns the method for the stream. -func (s *Stream) Method() string { - return s.method -} - -// StatusCode returns statusCode received from the server. -func (s *Stream) StatusCode() codes.Code { - return s.statusCode -} - -// StatusDesc returns statusDesc received from the server. -func (s *Stream) StatusDesc() string { - return s.statusDesc -} - -// ErrIllegalTrailerSet indicates that the trailer has already been set or it -// is too late to do so. -var ErrIllegalTrailerSet = errors.New("transport: trailer has been set") - -// SetTrailer sets the trailer metadata which will be sent with the RPC status -// by the server. This can only be called at most once. Server side only. -func (s *Stream) SetTrailer(md metadata.MD) error { - s.mu.Lock() - defer s.mu.Unlock() - if s.trailer != nil { - return ErrIllegalTrailerSet - } - s.trailer = md.Copy() - return nil -} - -func (s *Stream) write(m recvMsg) { - s.buf.put(&m) -} - -// Read reads all the data available for this Stream from the transport and -// passes them into the decoder, which converts them into a gRPC message stream. -// The error is io.EOF when the stream is done or another non-nil error if -// the stream broke. -func (s *Stream) Read(p []byte) (n int, err error) { - n, err = s.dec.Read(p) - if err != nil { - return - } - s.windowHandler(n) - return -} - -// The key to save transport.Stream in the context. -type streamKey struct{} - -// newContextWithStream creates a new context from ctx and attaches stream -// to it. -func newContextWithStream(ctx context.Context, stream *Stream) context.Context { - return context.WithValue(ctx, streamKey{}, stream) -} - -// StreamFromContext returns the stream saved in ctx. -func StreamFromContext(ctx context.Context) (s *Stream, ok bool) { - s, ok = ctx.Value(streamKey{}).(*Stream) - return -} - -// state of transport -type transportState int - -const ( - reachable transportState = iota - unreachable - closing - draining -) - -// NewServerTransport creates a ServerTransport with conn or non-nil error -// if it fails. -func NewServerTransport(protocol string, conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (ServerTransport, error) { - return newHTTP2Server(conn, maxStreams, authInfo) -} - -// ConnectOptions covers all relevant options for dialing a server. -type ConnectOptions struct { - // UserAgent is the application user agent. - UserAgent string - // Dialer specifies how to dial a network address. - Dialer func(context.Context, string) (net.Conn, error) - // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. - PerRPCCredentials []credentials.PerRPCCredentials - // TransportCredentials stores the Authenticator required to setup a client connection. - TransportCredentials credentials.TransportCredentials -} - -// NewClientTransport establishes the transport with the required ConnectOptions -// and returns it to the caller. -func NewClientTransport(ctx context.Context, target string, opts ConnectOptions) (ClientTransport, error) { - return newHTTP2Client(ctx, target, opts) -} - -// Options provides additional hints and information for message -// transmission. -type Options struct { - // Last indicates whether this write is the last piece for - // this stream. - Last bool - - // Delay is a hint to the transport implementation for whether - // the data could be buffered for a batching write. The - // Transport implementation may ignore the hint. - Delay bool -} - -// CallHdr carries the information of a particular RPC. -type CallHdr struct { - // Host specifies the peer's host. - Host string - - // Method specifies the operation to perform. - Method string - - // RecvCompress specifies the compression algorithm applied on - // inbound messages. - RecvCompress string - - // SendCompress specifies the compression algorithm applied on - // outbound message. - SendCompress string - - // Flush indicates whether a new stream command should be sent - // to the peer without waiting for the first data. This is - // only a hint. The transport may modify the flush decision - // for performance purposes. - Flush bool -} - -// ClientTransport is the common interface for all gRPC client-side transport -// implementations. -type ClientTransport interface { - // Close tears down this transport. Once it returns, the transport - // should not be accessed any more. The caller must make sure this - // is called only once. - Close() error - - // GracefulClose starts to tear down the transport. It stops accepting - // new RPCs and wait the completion of the pending RPCs. - GracefulClose() error - - // Write sends the data for the given stream. A nil stream indicates - // the write is to be performed on the transport as a whole. - Write(s *Stream, data []byte, opts *Options) error - - // NewStream creates a Stream for an RPC. - NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) - - // CloseStream clears the footprint of a stream when the stream is - // not needed any more. The err indicates the error incurred when - // CloseStream is called. Must be called when a stream is finished - // unless the associated transport is closing. - CloseStream(stream *Stream, err error) - - // Error returns a channel that is closed when some I/O error - // happens. Typically the caller should have a goroutine to monitor - // this in order to take action (e.g., close the current transport - // and create a new one) in error case. It should not return nil - // once the transport is initiated. - Error() <-chan struct{} - - // GoAway returns a channel that is closed when ClientTranspor - // receives the draining signal from the server (e.g., GOAWAY frame in - // HTTP/2). - GoAway() <-chan struct{} -} - -// ServerTransport is the common interface for all gRPC server-side transport -// implementations. -// -// Methods may be called concurrently from multiple goroutines, but -// Write methods for a given Stream will be called serially. -type ServerTransport interface { - // HandleStreams receives incoming streams using the given handler. - HandleStreams(func(*Stream)) - - // WriteHeader sends the header metadata for the given stream. - // WriteHeader may not be called on all streams. - WriteHeader(s *Stream, md metadata.MD) error - - // Write sends the data for the given stream. - // Write may not be called on all streams. - Write(s *Stream, data []byte, opts *Options) error - - // WriteStatus sends the status of a stream to the client. - // WriteStatus is the final call made on a stream and always - // occurs. - WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error - - // Close tears down the transport. Once it is called, the transport - // should not be accessed any more. All the pending streams and their - // handlers will be terminated asynchronously. - Close() error - - // RemoteAddr returns the remote network address. - RemoteAddr() net.Addr - - // Drain notifies the client this ServerTransport stops accepting new RPCs. - Drain() -} - -// StreamErrorf creates an StreamError with the specified error code and description. -func StreamErrorf(c codes.Code, format string, a ...interface{}) StreamError { - return StreamError{ - Code: c, - Desc: fmt.Sprintf(format, a...), - } -} - -// ConnectionErrorf creates an ConnectionError with the specified error description. -func ConnectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { - return ConnectionError{ - Desc: fmt.Sprintf(format, a...), - temp: temp, - err: e, - } -} - -// ConnectionError is an error that results in the termination of the -// entire connection and the retry of all the active streams. -type ConnectionError struct { - Desc string - temp bool - err error -} - -func (e ConnectionError) Error() string { - return fmt.Sprintf("connection error: desc = %q", e.Desc) -} - -// Temporary indicates if this connection error is temporary or fatal. -func (e ConnectionError) Temporary() bool { - return e.temp -} - -// Origin returns the original error of this connection error. -func (e ConnectionError) Origin() error { - // Never return nil error here. - // If the original error is nil, return itself. - if e.err == nil { - return e - } - return e.err -} - -var ( - // ErrConnClosing indicates that the transport is closing. - ErrConnClosing = ConnectionErrorf(true, nil, "transport is closing") - // ErrStreamDrain indicates that the stream is rejected by the server because - // the server stops accepting new RPCs. - ErrStreamDrain = StreamErrorf(codes.Unavailable, "the server stops accepting new RPCs") -) - -// StreamError is an error that only affects one stream within a connection. -type StreamError struct { - Code codes.Code - Desc string -} - -func (e StreamError) Error() string { - return fmt.Sprintf("stream error: code = %d desc = %q", e.Code, e.Desc) -} - -// ContextErr converts the error from context package into a StreamError. -func ContextErr(err error) StreamError { - switch err { - case context.DeadlineExceeded: - return StreamErrorf(codes.DeadlineExceeded, "%v", err) - case context.Canceled: - return StreamErrorf(codes.Canceled, "%v", err) - } - panic(fmt.Sprintf("Unexpected error from context packet: %v", err)) -} - -// wait blocks until it can receive from ctx.Done, closing, or proceed. -// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err. -// If it receives from done, it returns 0, io.EOF if ctx is not done; otherwise -// it return the StreamError for ctx.Err. -// If it receives from goAway, it returns 0, ErrStreamDrain. -// If it receives from closing, it returns 0, ErrConnClosing. -// If it receives from proceed, it returns the received integer, nil. -func wait(ctx context.Context, done, goAway, closing <-chan struct{}, proceed <-chan int) (int, error) { - select { - case <-ctx.Done(): - return 0, ContextErr(ctx.Err()) - case <-done: - // User cancellation has precedence. - select { - case <-ctx.Done(): - return 0, ContextErr(ctx.Err()) - default: - } - return 0, io.EOF - case <-goAway: - return 0, ErrStreamDrain - case <-closing: - return 0, ErrConnClosing - case i := <-proceed: - return i, nil - } -} diff --git a/vendor/vendor.json b/vendor/vendor.json index b858129b13..65fefcfee0 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -298,96 +298,18 @@ "revision": "b400c2eff1badec7022a8c8f5bea058b6315eed7", "revisionTime": "2016-06-19T19:44:24Z" }, - { - "checksumSHA1": "BoPkYbHfVf/rl8JH6NlxFEqZN/M=", - "path": "golang.org/x/net/http2", - "revision": "6250b412798208e6c90b03b7c4f226de5aa299e2", - "revisionTime": "2016-08-24T22:20:41Z" - }, - { - "checksumSHA1": "EYNaHp7XdLWRydUCE0amEkKAtgk=", - "path": "golang.org/x/net/http2/hpack", - "revision": "6250b412798208e6c90b03b7c4f226de5aa299e2", - "revisionTime": "2016-08-24T22:20:41Z" - }, { "checksumSHA1": "/k7k6eJDkxXx6K9Zpo/OwNm58XM=", "path": "golang.org/x/net/internal/timeseries", "revision": "6250b412798208e6c90b03b7c4f226de5aa299e2", "revisionTime": "2016-08-24T22:20:41Z" }, - { - "checksumSHA1": "yhndhWXMs/VSEDLks4dNyFMQStA=", - "path": "golang.org/x/net/lex/httplex", - "revision": "6250b412798208e6c90b03b7c4f226de5aa299e2", - "revisionTime": "2016-08-24T22:20:41Z" - }, - { - "checksumSHA1": "WpST9lFOHvWrjDVy0GJNqHe+R3E=", - "path": "golang.org/x/net/trace", - "revision": "6250b412798208e6c90b03b7c4f226de5aa299e2", - "revisionTime": "2016-08-24T22:20:41Z" - }, { "checksumSHA1": "8fD/im5Kwvy3JgmxulDTambmE8w=", "path": "golang.org/x/sys/unix", "revision": "a408501be4d17ee978c04a618e7a1b22af058c0e", "revisionTime": "2016-07-03T23:56:20Z" }, - { - "checksumSHA1": "PUYeWv5OXNfb+9eufS8HNddJAjI=", - "path": "google.golang.org/grpc", - "revision": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b", - "revisionTime": "2016-08-26T22:36:31Z" - }, - { - "checksumSHA1": "08icuA15HRkdYCt6H+Cs90RPQsY=", - "path": "google.golang.org/grpc/codes", - "revision": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b", - "revisionTime": "2016-08-26T22:36:31Z" - }, - { - "checksumSHA1": "sbiWvqfhNKqrDC/nv0W/FaO3EOA=", - "path": "google.golang.org/grpc/credentials", - "revision": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b", - "revisionTime": "2016-08-26T22:36:31Z" - }, - { - "checksumSHA1": "3Lt5hNAG8qJAYSsNghR5uA1zQns=", - "path": "google.golang.org/grpc/grpclog", - "revision": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b", - "revisionTime": "2016-08-26T22:36:31Z" - }, - { - "checksumSHA1": "T3Q0p8kzvXFnRkMaK/G8mCv6mc0=", - "path": "google.golang.org/grpc/internal", - "revision": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b", - "revisionTime": "2016-08-26T22:36:31Z" - }, - { - "checksumSHA1": "rxtneb9jDjKPHNZhC5obfG++6bI=", - "path": "google.golang.org/grpc/metadata", - "revision": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b", - "revisionTime": "2016-08-26T22:36:31Z" - }, - { - "checksumSHA1": "4GSUFhOQ0kdFlBH4D5OTeKy78z0=", - "path": "google.golang.org/grpc/naming", - "revision": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b", - "revisionTime": "2016-08-26T22:36:31Z" - }, - { - "checksumSHA1": "3RRoLeH6X2//7tVClOVzxW2bY+E=", - "path": "google.golang.org/grpc/peer", - "revision": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b", - "revisionTime": "2016-08-26T22:36:31Z" - }, - { - "checksumSHA1": "yC4HCpGcQ++BIojq47rlc4QNPm4=", - "path": "google.golang.org/grpc/transport", - "revision": "79b7c349179cdd6efd8bac4a1ce7f01b98c16e9b", - "revisionTime": "2016-08-26T22:36:31Z" - }, { "checksumSHA1": "JfVmsMwyeeepbdw4q4wpN07BuFg=", "path": "gopkg.in/fsnotify.v1",