mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-10 07:34:04 -08:00
vendor: add and update proto/grpc vendoring
This commit is contained in:
parent
8a2850b5e1
commit
dc15a6f6ea
11
vendor/github.com/cockroachdb/cmux/CONTRIBUTORS
generated
vendored
Normal file
11
vendor/github.com/cockroachdb/cmux/CONTRIBUTORS
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
# The list of people who have contributed code to the cmux repository.
|
||||||
|
#
|
||||||
|
# Auto-generated with:
|
||||||
|
# git log --oneline --pretty=format:'%an <%aE>' | sort -u
|
||||||
|
#
|
||||||
|
Dmitri Shuralyov <shurcooL@gmail.com>
|
||||||
|
Ethan Mosbaugh <emosbaugh@gmail.com>
|
||||||
|
Soheil Hassas Yeganeh <soheil.h.y@gmail.com>
|
||||||
|
Soheil Hassas Yeganeh <soheil@cs.toronto.edu>
|
||||||
|
Tamir Duberstein <tamir@cockroachlabs.com>
|
||||||
|
Tamir Duberstein <tamird@gmail.com>
|
202
vendor/github.com/cockroachdb/cmux/LICENSE
generated
vendored
Normal file
202
vendor/github.com/cockroachdb/cmux/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
72
vendor/github.com/cockroachdb/cmux/README.md
generated
vendored
Normal file
72
vendor/github.com/cockroachdb/cmux/README.md
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
||||||
|
# cmux: Connection Mux [![Build Status](https://travis-ci.org/cockroachdb/cmux.svg?branch=master)](https://travis-ci.org/cockroachdb/cmux) [![GoDoc](https://godoc.org/github.com/cockroachdb/cmux?status.svg)](https://godoc.org/github.com/cockroachdb/cmux)
|
||||||
|
|
||||||
|
cmux is a generic Go library to multiplex connections based on their payload.
|
||||||
|
Using cmux, you can serve gRPC, SSH, HTTPS, HTTP, Go RPC, and pretty much any
|
||||||
|
other protocol on the same TCP listener.
|
||||||
|
|
||||||
|
## How-To
|
||||||
|
Simply create your main listener, create a cmux for that listener,
|
||||||
|
and then match connections:
|
||||||
|
```go
|
||||||
|
// Create the main listener.
|
||||||
|
l, err := net.Listen("tcp", ":23456")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create a cmux.
|
||||||
|
m := cmux.New(l)
|
||||||
|
|
||||||
|
// Match connections in order:
|
||||||
|
// First grpc, then HTTP, and otherwise Go RPC/TCP.
|
||||||
|
grpcL := m.Match(cmux.HTTP2HeaderField("content-type", "application/grpc"))
|
||||||
|
httpL := m.Match(cmux.HTTP1Fast())
|
||||||
|
trpcL := m.Match(cmux.Any()) // Any means anything that is not yet matched.
|
||||||
|
|
||||||
|
// Create your protocol servers.
|
||||||
|
grpcS := grpc.NewServer()
|
||||||
|
grpchello.RegisterGreeterServer(grpcs, &server{})
|
||||||
|
|
||||||
|
httpS := &http.Server{
|
||||||
|
Handler: &helloHTTP1Handler{},
|
||||||
|
}
|
||||||
|
|
||||||
|
trpcS := rpc.NewServer()
|
||||||
|
s.Register(&ExampleRPCRcvr{})
|
||||||
|
|
||||||
|
// Use the muxed listeners for your servers.
|
||||||
|
go grpcS.Serve(grpcL)
|
||||||
|
go httpS.Serve(httpL)
|
||||||
|
go trpcS.Accept(trpcL)
|
||||||
|
|
||||||
|
// Start serving!
|
||||||
|
m.Serve()
|
||||||
|
```
|
||||||
|
|
||||||
|
There are [more examples on GoDoc](https://godoc.org/github.com/cockroachdb/cmux#pkg-examples).
|
||||||
|
|
||||||
|
## Performance
|
||||||
|
Since we are only matching the very first bytes of a connection, the
|
||||||
|
performance overhead on long-lived connections (i.e., RPCs and pipelined HTTP
|
||||||
|
streams) is negligible.
|
||||||
|
|
||||||
|
## Limitations
|
||||||
|
* *TLS*: `net/http` uses a [type assertion](https://github.com/golang/go/issues/14221)
|
||||||
|
to identify TLS connections; since cmux's lookahead-implementing connection
|
||||||
|
wraps the underlying TLS connection, this type assertion fails. This means you
|
||||||
|
can serve HTTPS using cmux but `http.Request.TLS` will not be set in your
|
||||||
|
handlers. If you are able to wrap TLS around cmux, you can work around this
|
||||||
|
limitation. See https://github.com/cockroachdb/cockroach/commit/83caba2 for an
|
||||||
|
example of this approach.
|
||||||
|
|
||||||
|
* *Different Protocols on The Same Connection*: `cmux` matches the connection
|
||||||
|
when it's accepted. For example, one connection can be either gRPC or REST, but
|
||||||
|
not both. That is, we assume that a client connection is either used for gRPC
|
||||||
|
or REST.
|
||||||
|
|
||||||
|
# Copyright and License
|
||||||
|
Copyright 2016 The CMux Authors. All rights reserved.
|
||||||
|
|
||||||
|
See [CONTRIBUTORS](https://github.com/cockroachdb/cmux/blob/master/CONTRIBUTORS)
|
||||||
|
for the CMux Authors. Code is released under
|
||||||
|
[the Apache 2 license](https://github.com/cockroachdb/cmux/blob/master/LICENSE).
|
49
vendor/github.com/cockroachdb/cmux/buffer.go
generated
vendored
Normal file
49
vendor/github.com/cockroachdb/cmux/buffer.go
generated
vendored
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
// Copyright 2016 The CMux Authors. All rights reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
// implied. See the License for the specific language governing
|
||||||
|
// permissions and limitations under the License.
|
||||||
|
|
||||||
|
package cmux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// bufferedReader is an optimized implementation of io.Reader that behaves like
|
||||||
|
// ```
|
||||||
|
// io.MultiReader(bytes.NewReader(buffer.Bytes()), io.TeeReader(source, buffer))
|
||||||
|
// ```
|
||||||
|
// without allocating.
|
||||||
|
type bufferedReader struct {
|
||||||
|
source io.Reader
|
||||||
|
buffer *bytes.Buffer
|
||||||
|
bufferRead int
|
||||||
|
bufferSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *bufferedReader) Read(p []byte) (int, error) {
|
||||||
|
// Functionality of bytes.Reader.
|
||||||
|
bn := copy(p, s.buffer.Bytes()[s.bufferRead:s.bufferSize])
|
||||||
|
s.bufferRead += bn
|
||||||
|
|
||||||
|
p = p[bn:]
|
||||||
|
|
||||||
|
// Funtionality of io.TeeReader.
|
||||||
|
sn, sErr := s.source.Read(p)
|
||||||
|
if sn > 0 {
|
||||||
|
if wn, wErr := s.buffer.Write(p[:sn]); wErr != nil {
|
||||||
|
return bn + wn, wErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return bn + sn, sErr
|
||||||
|
}
|
224
vendor/github.com/cockroachdb/cmux/cmux.go
generated
vendored
Normal file
224
vendor/github.com/cockroachdb/cmux/cmux.go
generated
vendored
Normal file
|
@ -0,0 +1,224 @@
|
||||||
|
// Copyright 2016 The CMux Authors. All rights reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
// implied. See the License for the specific language governing
|
||||||
|
// permissions and limitations under the License.
|
||||||
|
|
||||||
|
package cmux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Matcher matches a connection based on its content.
|
||||||
|
type Matcher func(io.Reader) bool
|
||||||
|
|
||||||
|
// ErrorHandler handles an error and returns whether
|
||||||
|
// the mux should continue serving the listener.
|
||||||
|
type ErrorHandler func(error) bool
|
||||||
|
|
||||||
|
var _ net.Error = ErrNotMatched{}
|
||||||
|
|
||||||
|
// ErrNotMatched is returned whenever a connection is not matched by any of
|
||||||
|
// the matchers registered in the multiplexer.
|
||||||
|
type ErrNotMatched struct {
|
||||||
|
c net.Conn
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e ErrNotMatched) Error() string {
|
||||||
|
return fmt.Sprintf("mux: connection %v not matched by an matcher",
|
||||||
|
e.c.RemoteAddr())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Temporary implements the net.Error interface.
|
||||||
|
func (e ErrNotMatched) Temporary() bool { return true }
|
||||||
|
|
||||||
|
// Timeout implements the net.Error interface.
|
||||||
|
func (e ErrNotMatched) Timeout() bool { return false }
|
||||||
|
|
||||||
|
type errListenerClosed string
|
||||||
|
|
||||||
|
func (e errListenerClosed) Error() string { return string(e) }
|
||||||
|
func (e errListenerClosed) Temporary() bool { return false }
|
||||||
|
func (e errListenerClosed) Timeout() bool { return false }
|
||||||
|
|
||||||
|
// ErrListenerClosed is returned from muxListener.Accept when the underlying
|
||||||
|
// listener is closed.
|
||||||
|
var ErrListenerClosed = errListenerClosed("mux: listener closed")
|
||||||
|
|
||||||
|
// New instantiates a new connection multiplexer.
|
||||||
|
func New(l net.Listener) CMux {
|
||||||
|
return &cMux{
|
||||||
|
root: l,
|
||||||
|
bufLen: 1024,
|
||||||
|
errh: func(_ error) bool { return true },
|
||||||
|
donec: make(chan struct{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CMux is a multiplexer for network connections.
|
||||||
|
type CMux interface {
|
||||||
|
// Match returns a net.Listener that sees (i.e., accepts) only
|
||||||
|
// the connections matched by at least one of the matcher.
|
||||||
|
//
|
||||||
|
// The order used to call Match determines the priority of matchers.
|
||||||
|
Match(...Matcher) net.Listener
|
||||||
|
// Serve starts multiplexing the listener. Serve blocks and perhaps
|
||||||
|
// should be invoked concurrently within a go routine.
|
||||||
|
Serve() error
|
||||||
|
// HandleError registers an error handler that handles listener errors.
|
||||||
|
HandleError(ErrorHandler)
|
||||||
|
}
|
||||||
|
|
||||||
|
type matchersListener struct {
|
||||||
|
ss []Matcher
|
||||||
|
l muxListener
|
||||||
|
}
|
||||||
|
|
||||||
|
type cMux struct {
|
||||||
|
root net.Listener
|
||||||
|
bufLen int
|
||||||
|
errh ErrorHandler
|
||||||
|
donec chan struct{}
|
||||||
|
sls []matchersListener
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *cMux) Match(matchers ...Matcher) net.Listener {
|
||||||
|
ml := muxListener{
|
||||||
|
Listener: m.root,
|
||||||
|
connc: make(chan net.Conn, m.bufLen),
|
||||||
|
}
|
||||||
|
m.sls = append(m.sls, matchersListener{ss: matchers, l: ml})
|
||||||
|
return ml
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *cMux) Serve() error {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
close(m.donec)
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
for _, sl := range m.sls {
|
||||||
|
close(sl.l.connc)
|
||||||
|
// Drain the connections enqueued for the listener.
|
||||||
|
for c := range sl.l.connc {
|
||||||
|
_ = c.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
for {
|
||||||
|
c, err := m.root.Accept()
|
||||||
|
if err != nil {
|
||||||
|
if !m.handleErr(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Add(1)
|
||||||
|
go m.serve(c, m.donec, &wg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *cMux) serve(c net.Conn, donec <-chan struct{}, wg *sync.WaitGroup) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
muc := newMuxConn(c)
|
||||||
|
for _, sl := range m.sls {
|
||||||
|
for _, s := range sl.ss {
|
||||||
|
matched := s(muc.getSniffer())
|
||||||
|
if matched {
|
||||||
|
select {
|
||||||
|
case sl.l.connc <- muc:
|
||||||
|
case <-donec:
|
||||||
|
_ = c.Close()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = c.Close()
|
||||||
|
err := ErrNotMatched{c: c}
|
||||||
|
if !m.handleErr(err) {
|
||||||
|
_ = m.root.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *cMux) HandleError(h ErrorHandler) {
|
||||||
|
m.errh = h
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *cMux) handleErr(err error) bool {
|
||||||
|
if !m.errh(err) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if ne, ok := err.(net.Error); ok {
|
||||||
|
return ne.Temporary()
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
type muxListener struct {
|
||||||
|
net.Listener
|
||||||
|
connc chan net.Conn
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l muxListener) Accept() (net.Conn, error) {
|
||||||
|
c, ok := <-l.connc
|
||||||
|
if !ok {
|
||||||
|
return nil, ErrListenerClosed
|
||||||
|
}
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MuxConn wraps a net.Conn and provides transparent sniffing of connection data.
|
||||||
|
type MuxConn struct {
|
||||||
|
net.Conn
|
||||||
|
buf bytes.Buffer
|
||||||
|
sniffer bufferedReader
|
||||||
|
}
|
||||||
|
|
||||||
|
func newMuxConn(c net.Conn) *MuxConn {
|
||||||
|
return &MuxConn{
|
||||||
|
Conn: c,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// From the io.Reader documentation:
|
||||||
|
//
|
||||||
|
// When Read encounters an error or end-of-file condition after
|
||||||
|
// successfully reading n > 0 bytes, it returns the number of
|
||||||
|
// bytes read. It may return the (non-nil) error from the same call
|
||||||
|
// or return the error (and n == 0) from a subsequent call.
|
||||||
|
// An instance of this general case is that a Reader returning
|
||||||
|
// a non-zero number of bytes at the end of the input stream may
|
||||||
|
// return either err == EOF or err == nil. The next Read should
|
||||||
|
// return 0, EOF.
|
||||||
|
func (m *MuxConn) Read(p []byte) (int, error) {
|
||||||
|
if n, err := m.buf.Read(p); err != io.EOF {
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
return m.Conn.Read(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *MuxConn) getSniffer() io.Reader {
|
||||||
|
m.sniffer = bufferedReader{source: m.Conn, buffer: &m.buf, bufferSize: m.buf.Len()}
|
||||||
|
return &m.sniffer
|
||||||
|
}
|
18
vendor/github.com/cockroachdb/cmux/doc.go
generated
vendored
Normal file
18
vendor/github.com/cockroachdb/cmux/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
// Copyright 2016 The CMux Authors. All rights reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
// implied. See the License for the specific language governing
|
||||||
|
// permissions and limitations under the License.
|
||||||
|
|
||||||
|
// Package cmux is a library to multiplex network connections based on
|
||||||
|
// their payload. Using cmux, you can serve different protocols from the
|
||||||
|
// same listener.
|
||||||
|
package cmux
|
164
vendor/github.com/cockroachdb/cmux/matchers.go
generated
vendored
Normal file
164
vendor/github.com/cockroachdb/cmux/matchers.go
generated
vendored
Normal file
|
@ -0,0 +1,164 @@
|
||||||
|
// Copyright 2016 The CMux Authors. All rights reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
// implied. See the License for the specific language governing
|
||||||
|
// permissions and limitations under the License.
|
||||||
|
|
||||||
|
package cmux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/net/http2"
|
||||||
|
"golang.org/x/net/http2/hpack"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Any is a Matcher that matches any connection.
|
||||||
|
func Any() Matcher {
|
||||||
|
return func(r io.Reader) bool { return true }
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrefixMatcher returns a matcher that matches a connection if it
|
||||||
|
// starts with any of the strings in strs.
|
||||||
|
func PrefixMatcher(strs ...string) Matcher {
|
||||||
|
pt := newPatriciaTreeString(strs...)
|
||||||
|
return pt.matchPrefix
|
||||||
|
}
|
||||||
|
|
||||||
|
var defaultHTTPMethods = []string{
|
||||||
|
"OPTIONS",
|
||||||
|
"GET",
|
||||||
|
"HEAD",
|
||||||
|
"POST",
|
||||||
|
"PUT",
|
||||||
|
"DELETE",
|
||||||
|
"TRACE",
|
||||||
|
"CONNECT",
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTP1Fast only matches the methods in the HTTP request.
|
||||||
|
//
|
||||||
|
// This matcher is very optimistic: if it returns true, it does not mean that
|
||||||
|
// the request is a valid HTTP response. If you want a correct but slower HTTP1
|
||||||
|
// matcher, use HTTP1 instead.
|
||||||
|
func HTTP1Fast(extMethods ...string) Matcher {
|
||||||
|
return PrefixMatcher(append(defaultHTTPMethods, extMethods...)...)
|
||||||
|
}
|
||||||
|
|
||||||
|
const maxHTTPRead = 4096
|
||||||
|
|
||||||
|
// HTTP1 parses the first line or upto 4096 bytes of the request to see if
|
||||||
|
// the conection contains an HTTP request.
|
||||||
|
func HTTP1() Matcher {
|
||||||
|
return func(r io.Reader) bool {
|
||||||
|
br := bufio.NewReader(&io.LimitedReader{R: r, N: maxHTTPRead})
|
||||||
|
l, part, err := br.ReadLine()
|
||||||
|
if err != nil || part {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, proto, ok := parseRequestLine(string(l))
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
v, _, ok := http.ParseHTTPVersion(proto)
|
||||||
|
return ok && v == 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// grabbed from net/http.
|
||||||
|
func parseRequestLine(line string) (method, uri, proto string, ok bool) {
|
||||||
|
s1 := strings.Index(line, " ")
|
||||||
|
s2 := strings.Index(line[s1+1:], " ")
|
||||||
|
if s1 < 0 || s2 < 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
s2 += s1 + 1
|
||||||
|
return line[:s1], line[s1+1 : s2], line[s2+1:], true
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTP2 parses the frame header of the first frame to detect whether the
|
||||||
|
// connection is an HTTP2 connection.
|
||||||
|
func HTTP2() Matcher {
|
||||||
|
return hasHTTP2Preface
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTP1HeaderField returns a matcher matching the header fields of the first
|
||||||
|
// request of an HTTP 1 connection.
|
||||||
|
func HTTP1HeaderField(name, value string) Matcher {
|
||||||
|
return func(r io.Reader) bool {
|
||||||
|
return matchHTTP1Field(r, name, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTP2HeaderField resturns a matcher matching the header fields of the first
|
||||||
|
// headers frame.
|
||||||
|
func HTTP2HeaderField(name, value string) Matcher {
|
||||||
|
return func(r io.Reader) bool {
|
||||||
|
return matchHTTP2Field(r, name, value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasHTTP2Preface(r io.Reader) bool {
|
||||||
|
var b [len(http2.ClientPreface)]byte
|
||||||
|
if _, err := io.ReadFull(r, b[:]); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(b[:]) == http2.ClientPreface
|
||||||
|
}
|
||||||
|
|
||||||
|
func matchHTTP1Field(r io.Reader, name, value string) (matched bool) {
|
||||||
|
req, err := http.ReadRequest(bufio.NewReader(r))
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return req.Header.Get(name) == value
|
||||||
|
}
|
||||||
|
|
||||||
|
func matchHTTP2Field(r io.Reader, name, value string) (matched bool) {
|
||||||
|
if !hasHTTP2Preface(r) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
framer := http2.NewFramer(ioutil.Discard, r)
|
||||||
|
hdec := hpack.NewDecoder(uint32(4<<10), func(hf hpack.HeaderField) {
|
||||||
|
if hf.Name == name && hf.Value == value {
|
||||||
|
matched = true
|
||||||
|
}
|
||||||
|
})
|
||||||
|
for {
|
||||||
|
f, err := framer.ReadFrame()
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
switch f := f.(type) {
|
||||||
|
case *http2.HeadersFrame:
|
||||||
|
if _, err := hdec.Write(f.HeaderBlockFragment()); err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if matched {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if f.FrameHeader.Flags&http2.FlagHeadersEndHeaders != 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
179
vendor/github.com/cockroachdb/cmux/patricia.go
generated
vendored
Normal file
179
vendor/github.com/cockroachdb/cmux/patricia.go
generated
vendored
Normal file
|
@ -0,0 +1,179 @@
|
||||||
|
// Copyright 2016 The CMux Authors. All rights reserved.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
// implied. See the License for the specific language governing
|
||||||
|
// permissions and limitations under the License.
|
||||||
|
|
||||||
|
package cmux
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// patriciaTree is a simple patricia tree that handles []byte instead of string
|
||||||
|
// and cannot be changed after instantiation.
|
||||||
|
type patriciaTree struct {
|
||||||
|
root *ptNode
|
||||||
|
maxDepth int // max depth of the tree.
|
||||||
|
}
|
||||||
|
|
||||||
|
func newPatriciaTree(bs ...[]byte) *patriciaTree {
|
||||||
|
max := 0
|
||||||
|
for _, b := range bs {
|
||||||
|
if max < len(b) {
|
||||||
|
max = len(b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return &patriciaTree{
|
||||||
|
root: newNode(bs),
|
||||||
|
maxDepth: max + 1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newPatriciaTreeString(strs ...string) *patriciaTree {
|
||||||
|
b := make([][]byte, len(strs))
|
||||||
|
for i, s := range strs {
|
||||||
|
b[i] = []byte(s)
|
||||||
|
}
|
||||||
|
return newPatriciaTree(b...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *patriciaTree) matchPrefix(r io.Reader) bool {
|
||||||
|
buf := make([]byte, t.maxDepth)
|
||||||
|
n, _ := io.ReadFull(r, buf)
|
||||||
|
return t.root.match(buf[:n], true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *patriciaTree) match(r io.Reader) bool {
|
||||||
|
buf := make([]byte, t.maxDepth)
|
||||||
|
n, _ := io.ReadFull(r, buf)
|
||||||
|
return t.root.match(buf[:n], false)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ptNode struct {
|
||||||
|
prefix []byte
|
||||||
|
next map[byte]*ptNode
|
||||||
|
terminal bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func newNode(strs [][]byte) *ptNode {
|
||||||
|
if len(strs) == 0 {
|
||||||
|
return &ptNode{
|
||||||
|
prefix: []byte{},
|
||||||
|
terminal: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(strs) == 1 {
|
||||||
|
return &ptNode{
|
||||||
|
prefix: strs[0],
|
||||||
|
terminal: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
p, strs := splitPrefix(strs)
|
||||||
|
n := &ptNode{
|
||||||
|
prefix: p,
|
||||||
|
}
|
||||||
|
|
||||||
|
nexts := make(map[byte][][]byte)
|
||||||
|
for _, s := range strs {
|
||||||
|
if len(s) == 0 {
|
||||||
|
n.terminal = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
nexts[s[0]] = append(nexts[s[0]], s[1:])
|
||||||
|
}
|
||||||
|
|
||||||
|
n.next = make(map[byte]*ptNode)
|
||||||
|
for first, rests := range nexts {
|
||||||
|
n.next[first] = newNode(rests)
|
||||||
|
}
|
||||||
|
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func splitPrefix(bss [][]byte) (prefix []byte, rest [][]byte) {
|
||||||
|
if len(bss) == 0 || len(bss[0]) == 0 {
|
||||||
|
return prefix, bss
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(bss) == 1 {
|
||||||
|
return bss[0], [][]byte{{}}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; ; i++ {
|
||||||
|
var cur byte
|
||||||
|
eq := true
|
||||||
|
for j, b := range bss {
|
||||||
|
if len(b) <= i {
|
||||||
|
eq = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if j == 0 {
|
||||||
|
cur = b[i]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if cur != b[i] {
|
||||||
|
eq = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !eq {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
prefix = append(prefix, cur)
|
||||||
|
}
|
||||||
|
|
||||||
|
rest = make([][]byte, 0, len(bss))
|
||||||
|
for _, b := range bss {
|
||||||
|
rest = append(rest, b[len(prefix):])
|
||||||
|
}
|
||||||
|
|
||||||
|
return prefix, rest
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n *ptNode) match(b []byte, prefix bool) bool {
|
||||||
|
l := len(n.prefix)
|
||||||
|
if l > 0 {
|
||||||
|
if l > len(b) {
|
||||||
|
l = len(b)
|
||||||
|
}
|
||||||
|
if !bytes.Equal(b[:l], n.prefix) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if n.terminal && (prefix || len(n.prefix) == len(b)) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if l >= len(b) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
nextN, ok := n.next[b[l]]
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if l == len(b) {
|
||||||
|
b = b[l:l]
|
||||||
|
} else {
|
||||||
|
b = b[l+1:]
|
||||||
|
}
|
||||||
|
return nextN.match(b, prefix)
|
||||||
|
}
|
419
vendor/github.com/cockroachdb/cockroach/LICENSE
generated
vendored
Normal file
419
vendor/github.com/cockroachdb/cockroach/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,419 @@
|
||||||
|
CockroachDB Community License Agreement
|
||||||
|
|
||||||
|
Please read this CockroachDB Community License Agreement (the "Agreement")
|
||||||
|
carefully before using CockroachDB (as defined below), which is offered by
|
||||||
|
Cockroach Labs, Inc. or its affiliated Legal Entities ("Cockroach Labs").
|
||||||
|
|
||||||
|
By downloading CockroachDB or using it in any manner, You agree that You have
|
||||||
|
read and agree to be bound by the terms of this Agreement. If You are
|
||||||
|
accessing CockroachDB on behalf of a Legal Entity, You represent and warrant
|
||||||
|
that You have the authority to agree to these terms on its behalf and the
|
||||||
|
right to bind that Legal Entity to this Agreement. Use of CockroachDB is
|
||||||
|
expressly conditioned upon Your assent to all the terms of this Agreement, to
|
||||||
|
the exclusion of all other terms.
|
||||||
|
|
||||||
|
1. Definitions. In addition to other terms defined elsewhere in this
|
||||||
|
Agreement, the terms below have the following meanings.
|
||||||
|
|
||||||
|
(a) "CockroachDB" shall mean the SQL database software provided by Cockroach
|
||||||
|
Labs, including both CockroachDB Community and CockroachDB Enterprise
|
||||||
|
editions, as defined below.
|
||||||
|
|
||||||
|
(b) "CockroachDB Community Edition" shall mean the open source version of
|
||||||
|
CockroachDB, available free of charge at
|
||||||
|
|
||||||
|
https://github.com/cockroachdb/cockroach
|
||||||
|
|
||||||
|
(c) "CockroachDB Enterprise Edition" shall mean the additional features made
|
||||||
|
available by Cockroach Labs, the use of which is subject to additional
|
||||||
|
terms set out below.
|
||||||
|
|
||||||
|
(d) "Contribution" shall mean any work of authorship, including the original
|
||||||
|
version of the Work and any modifications or additions to that Work or
|
||||||
|
Derivative Works thereof, that is intentionally submitted Cockroach Labs
|
||||||
|
for inclusion in the Work by the copyright owner or by an individual or
|
||||||
|
Legal Entity authorized to submit on behalf of the copyright owner. For
|
||||||
|
the purposes of this definition, "submitted" means any form of
|
||||||
|
electronic, verbal, or written communication sent to Cockroach Labs or
|
||||||
|
its representatives, including but not limited to communication on
|
||||||
|
electronic mailing lists, source code control systems, and issue
|
||||||
|
tracking systems that are managed by, or on behalf of, Cockroach Labs
|
||||||
|
for the purpose of discussing and improving the Work, but excluding
|
||||||
|
communication that is conspicuously marked or otherwise designated in
|
||||||
|
writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
(e) "Contributor" shall mean any copyright owner or individual or Legal
|
||||||
|
Entity authorized by the copyright owner, other than Cockroach Labs,
|
||||||
|
from whom Cockroach Labs receives a Contribution that Cockroach Labs
|
||||||
|
subsequently incorporates within the Work.
|
||||||
|
|
||||||
|
(f) "Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work, such as a
|
||||||
|
translation, abridgement, condensation, or any other recasting,
|
||||||
|
transformation, or adaptation for which the editorial revisions,
|
||||||
|
annotations, elaborations, or other modifications represent, as a whole,
|
||||||
|
an original work of authorship. For the purposes of this License,
|
||||||
|
Derivative Works shall not include works that remain separable from, or
|
||||||
|
merely link (or bind by name) to the interfaces of, the Work and
|
||||||
|
Derivative Works thereof.
|
||||||
|
|
||||||
|
(g) "Legal Entity" shall mean the union of the acting entity and all other
|
||||||
|
entities that control, are controlled by, or are under common control
|
||||||
|
with that entity. For the purposes of this definition, "control" means
|
||||||
|
(i) the power, direct or indirect, to cause the direction or management
|
||||||
|
of such entity, whether by contract or otherwise, or (ii) ownership of
|
||||||
|
fifty percent (50%) or more of the outstanding shares, or (iii)
|
||||||
|
beneficial ownership of such entity.
|
||||||
|
|
||||||
|
(h) "License" shall mean the terms and conditions for use, reproduction, and
|
||||||
|
distribution of a Work as defined by this Agreement.
|
||||||
|
|
||||||
|
(i) "Licensor" shall mean Cockroach Labs or a Contributor, as applicable.
|
||||||
|
|
||||||
|
(j) "Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but not
|
||||||
|
limited to compiled object code, generated documentation, and
|
||||||
|
conversions to other media types.
|
||||||
|
|
||||||
|
(k) "Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation source,
|
||||||
|
and configuration files.
|
||||||
|
|
||||||
|
(l) "Third Party Works" shall mean Works, including Contributions, and other
|
||||||
|
technology owned by a person or Legal Entity other than Cockroach Labs,
|
||||||
|
as indicated by a copyright notice that is included in or attached to
|
||||||
|
such Works or technology.
|
||||||
|
|
||||||
|
(m) "Work" shall mean the work of authorship, whether in Source or Object
|
||||||
|
form, made available under a License, as indicated by a copyright notice
|
||||||
|
that is included in or attached to the work.
|
||||||
|
|
||||||
|
(n) "You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||||
|
permissions granted by this License.
|
||||||
|
|
||||||
|
2. Licenses.
|
||||||
|
|
||||||
|
(a) License to CockroachDB Community Edition. The License for CockroachDB
|
||||||
|
Community Edition is the Apache License, Version 2.0 ("Apache License").
|
||||||
|
The Apache License includes a grant of patent license, as well as
|
||||||
|
redistribution rights that are contingent on several requirements.
|
||||||
|
Please see
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
for full terms. CockroachDB Community Edition is a no-cost, entry-level
|
||||||
|
license and as such, contains the following disclaimers: NOTWITHSTANDING
|
||||||
|
ANYTHING TO THE CONTRARY HEREIN, COCKROACHDB COMMUNITY EDITION IS
|
||||||
|
PROVIDED "AS IS" AND "AS AVAILABLE", AND ALL EXPRESS OR IMPLIED
|
||||||
|
WARRANTIES ARE EXCLUDED AND DISCLAIMED, INCLUDING WITHOUT LIMITATION THE
|
||||||
|
IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE,
|
||||||
|
NON-INFRINGEMENT, AND ANY WARRANTIES ARISING BY STATUTE OR OTHERWISE IN
|
||||||
|
LAW OR FROM COURSE OF DEALING, COURSE OF PERFORMANCE, OR USE IN TRADE.
|
||||||
|
For clarity, the terms of this Agreement, other than the relevant
|
||||||
|
definitions in Section 1 and this Section 2(a) do not apply to
|
||||||
|
CockroachDB Community Edition.
|
||||||
|
|
||||||
|
(b) License to CockroachDB Enterprise Edition.
|
||||||
|
|
||||||
|
i Grant of Copyright License: Subject to the terms of this Agreement,
|
||||||
|
Licensor hereby grants to You a worldwide, non-exclusive,
|
||||||
|
non-transferable limited license to reproduce, prepare Enterprise
|
||||||
|
Derivative Works (as defined below) of, publicly display, publicly
|
||||||
|
perform, sublicense, and distribute CockroachDB Enterprise Edition
|
||||||
|
for Your business purposes, for so long as You are not in violation
|
||||||
|
of this Section 2(b) and are current on all payments required by
|
||||||
|
Section 4 below.
|
||||||
|
|
||||||
|
ii Grant of Patent License: Subject to the terms of this Agreement,
|
||||||
|
Licensor hereby grants to You a worldwide, non-exclusive,
|
||||||
|
non-transferable limited patent license to make, have made, use,
|
||||||
|
offer to sell, sell, import, and otherwise transfer CockroachDB
|
||||||
|
Enterprise Edition, where such license applies only to those patent
|
||||||
|
claims licensable by Licensor that are necessarily infringed by
|
||||||
|
their Contribution(s) alone or by combination of their
|
||||||
|
Contribution(s) with the Work to which such Contribution(s) was
|
||||||
|
submitted. If You institute patent litigation against any entity
|
||||||
|
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||||
|
the Work or a Contribution incorporated within the Work constitutes
|
||||||
|
direct or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate as
|
||||||
|
of the date such litigation is filed.
|
||||||
|
|
||||||
|
iii License to Third Party Works: From time to time Cockroach Labs may
|
||||||
|
use, or provide You access to, Third Party Works in connection
|
||||||
|
CockroachDB Enterprise Edition. You acknowledge and agree that in
|
||||||
|
addition to this Agreement, Your use of Third Party Works is subject
|
||||||
|
to all other terms and conditions set forth in the License provided
|
||||||
|
with or contained in such Third Party Works. Some Third Party Works
|
||||||
|
may be licensed to You solely for use with CockroachDB Enterprise
|
||||||
|
Edition under the terms of a third party License, or as otherwise
|
||||||
|
notified by Cockroach Labs, and not under the terms of this
|
||||||
|
Agreement. You agree that the owners and third party licensors of
|
||||||
|
Third Party Works are intended third party beneficiaries to this
|
||||||
|
Agreement.
|
||||||
|
|
||||||
|
3. Support. From time to time, in its sole discretion, Cockroach Labs may
|
||||||
|
offer professional services or support for CockroachDB, which may now or in
|
||||||
|
the future be subject to additional fees.
|
||||||
|
|
||||||
|
4. Fees for CockroachDB Enterprise Edition or CockroachDB Support.
|
||||||
|
|
||||||
|
(a) Fees. The License to CockroachDB Enterprise Edition is conditioned upon
|
||||||
|
Your payment of the fees specified on
|
||||||
|
|
||||||
|
https://cockroachlabs.com/pricing
|
||||||
|
|
||||||
|
which You agree to pay to Cockroach Labs in accordance with the payment
|
||||||
|
terms set out on that page. Any professional services or support for
|
||||||
|
CockroachDB may also be subject to Your payment of fees, which will be
|
||||||
|
specified by Cockroach Labs when you sign up to receive such
|
||||||
|
professional services or support. Cockroach Labs reserves the right to
|
||||||
|
change the fees at any time with prior written notice; for recurring
|
||||||
|
fees, any such adjustments will take effect as of the next pay period.
|
||||||
|
|
||||||
|
(b) Overdue Payments and Taxes. Overdue payments are subject to a service
|
||||||
|
charge equal to the lesser of 1.5% per month or the maximum legal
|
||||||
|
interest rate allowed by law, and You shall pay all Cockroach Labs’
|
||||||
|
reasonable costs of collection, including court costs and attorneys’
|
||||||
|
fees. Fees are stated and payable in U.S. dollars and are exclusive of
|
||||||
|
all sales, use, value added and similar taxes, duties, withholdings and
|
||||||
|
other governmental assessments (but excluding taxes based on Cockroach
|
||||||
|
Labs’ income) that may be levied on the transactions contemplated by
|
||||||
|
this Agreement in any jurisdiction, all of which are Your responsibility
|
||||||
|
unless you have provided Cockroach Labs with a valid tax-exempt
|
||||||
|
certificate.
|
||||||
|
|
||||||
|
(c) Record-keeping and Audit. If fees for CockroachDB Enterprise Edition
|
||||||
|
are based on the number of cores or servers running on CockroachDB
|
||||||
|
Enterprise Edition or another use-based unit of measurement, You must
|
||||||
|
maintain complete and accurate records with respect to Your use of
|
||||||
|
CockroachDB Enterprise Edition and will provide such records to
|
||||||
|
Cockroach Labs for inspection or audit upon Cockroach Labs’ reasonable
|
||||||
|
request. If an inspection or audit uncovers additional usage by You for
|
||||||
|
which fees are owed under this Agreement, then You shall pay for such
|
||||||
|
additional usage at Cockroach Labs’ then-current rates.
|
||||||
|
|
||||||
|
5. Trial License. If You have signed up for a trial or evaluation of
|
||||||
|
CockroachDB Enterprise Edition, Your License to CockroachDB Enterprise
|
||||||
|
Edition is granted without charge for the trial or evaluation period
|
||||||
|
specified when You signed up, or if no term was specified, for thirty (30)
|
||||||
|
calendar days, provided that Your License is granted solely for purposes of
|
||||||
|
Your internal evaluation of CockroachDB Enterprise Edition during the trial
|
||||||
|
or evaluation period (a "Trial License"). You may not use CockroachDB
|
||||||
|
Enterprise Edition under a Trial License more than once in any twelve (12)
|
||||||
|
month period. Cockroach Labs may revoke a Trial License at any time and
|
||||||
|
for any reason. Sections 3, 4, 9 and 11 of this Agreement do not apply to
|
||||||
|
Trial Licenses.
|
||||||
|
|
||||||
|
6. Redistribution. You may reproduce and distribute copies of the Work or
|
||||||
|
Derivative Works thereof in any medium, with or without modifications, and
|
||||||
|
in Source or Object form, provided that You meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or Derivative Works a
|
||||||
|
copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices stating
|
||||||
|
that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works that You
|
||||||
|
distribute, all copyright, patent, trademark, and attribution notices
|
||||||
|
from the Source form of the Work, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its distribution,
|
||||||
|
then any Derivative Works that You distribute must include a readable
|
||||||
|
copy of the attribution notices contained within such NOTICE file,
|
||||||
|
excluding those notices that do not pertain to any part of the
|
||||||
|
Derivative Works, in at least one of the following places: within a
|
||||||
|
NOTICE text file distributed as part of the Derivative Works; within the
|
||||||
|
Source form or documentation, if provided along with the Derivative
|
||||||
|
Works; or, within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents of the
|
||||||
|
NOTICE file are for informational purposes only and do not modify the
|
||||||
|
License. You may add Your own attribution notices within Derivative
|
||||||
|
Works that You distribute, alongside or as an addendum to the NOTICE
|
||||||
|
text from the Work, provided that such additional attribution notices
|
||||||
|
cannot be construed as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and may
|
||||||
|
provide additional or different license terms and conditions for use,
|
||||||
|
reproduction, or distribution of Your modifications, or for any such
|
||||||
|
Derivative Works as a whole, provided Your use, reproduction, and
|
||||||
|
distribution of the Work otherwise complies with the conditions stated
|
||||||
|
in this License.
|
||||||
|
|
||||||
|
(e) Enterprise Derivative Works: Derivative Works of CockroachDB Enterprise
|
||||||
|
Edition ("Enterprise Derivative Works") may be made, reproduced and
|
||||||
|
distributed in any medium, with or without modifications, in Source or
|
||||||
|
Object form, provided that each Enterprise Derivative Work will be
|
||||||
|
considered to include a License to CockroachDB Enterprise Edition and
|
||||||
|
thus will be subject to the payment of fees to Cockroach Labs by any
|
||||||
|
user of the Enterprise Derivative Work.
|
||||||
|
|
||||||
|
7. Submission of Contributions. Unless You explicitly state otherwise, any
|
||||||
|
Contribution intentionally submitted for inclusion in CockroachDB by You to
|
||||||
|
Cockroach Labs shall be under the terms and conditions of
|
||||||
|
|
||||||
|
https://cla-assistant.io/cockroachdb/cockroach
|
||||||
|
|
||||||
|
(which is based off of the Apache License), without any additional terms or
|
||||||
|
conditions, payments of royalties or otherwise to Your benefit.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify the
|
||||||
|
terms of any separate license agreement You may have executed with
|
||||||
|
Cockroach Labs regarding such Contributions.
|
||||||
|
|
||||||
|
8. Trademarks. This License does not grant permission to use the trade names,
|
||||||
|
trademarks, service marks, or product names of Licensor, except as required
|
||||||
|
for reasonable and customary use in describing the origin of the Work and
|
||||||
|
reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
9. Limited Warranty.
|
||||||
|
|
||||||
|
(a) Warranties. Cockroach Labs warrants to You that: (i) CockroachDB
|
||||||
|
Enterprise Edition will materially perform in accordance with the
|
||||||
|
applicable documentation for ninety (90) days after initial delivery to
|
||||||
|
You; and (ii) any professional services performed by Cockroach Labs
|
||||||
|
under this Agreement will be performed in a workmanlike manner, in
|
||||||
|
accordance with general industry standards.
|
||||||
|
|
||||||
|
(b) Exclusions. Cockroach Labs’ warranties in this Section 9 do not extend
|
||||||
|
to problems that result from: (i) Your failure to implement updates
|
||||||
|
issued by Cockroach Labs during the warranty period; (ii) any
|
||||||
|
alterations or additions (including Enterprise Derivative Works and
|
||||||
|
Contributions) to CockroachDB not performed by or at the direction of
|
||||||
|
Cockroach Labs; (iii) failures that are not reproducible by Cockroach
|
||||||
|
Labs; (iv) operation of CockroachDB Enterprise Edition in violation of
|
||||||
|
this Agreement or not in accordance with its documentation; (v) failures
|
||||||
|
caused by software, hardware or products not licensed or provided by
|
||||||
|
Cockroach Labs hereunder; or (vi) Third Party Works.
|
||||||
|
|
||||||
|
(c) Remedies. In the event of a breach of a warranty under this Section 9,
|
||||||
|
Cockroach Labs will, at its discretion and cost, either repair, replace
|
||||||
|
or re-perform the applicable Works or services or refund a portion of
|
||||||
|
fees previously paid to Cockroach Labs that are associated with the
|
||||||
|
defective Works or services. This is Your exclusive remedy, and
|
||||||
|
Cockroach Labs’ sole liability, arising in connection with the limited
|
||||||
|
warranties herein.
|
||||||
|
|
||||||
|
10. Disclaimer of Warranty. Except as set out in Section 9, unless required
|
||||||
|
by applicable law, Licensor provides the Work (and each Contributor
|
||||||
|
provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR
|
||||||
|
CONDITIONS OF ANY KIND, either express or implied, arising out of course
|
||||||
|
of dealing, course of performance, or usage in trade, including, without
|
||||||
|
limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT,
|
||||||
|
MERCHANTABILITY, CORRECTNESS, RELIABILITY, or FITNESS FOR A PARTICULAR
|
||||||
|
PURPOSE, all of which are hereby disclaimed. You are solely responsible
|
||||||
|
for determining the appropriateness of using or redistributing Works and
|
||||||
|
assume any risks associated with Your exercise of permissions under the
|
||||||
|
applicable License for such Works.
|
||||||
|
|
||||||
|
11. Limited Indemnity.
|
||||||
|
|
||||||
|
(a) Indemnity. Cockroach Labs will defend, indemnify and hold You harmless
|
||||||
|
against any third party claims, liabilities or expenses incurred
|
||||||
|
(including reasonable attorneys’ fees), as well as amounts finally
|
||||||
|
awarded in a settlement or a non-appealable judgement by a court
|
||||||
|
("Losses"), to the extent arising from any claim or allegation by a
|
||||||
|
third party that CockroachDB Enterprise Edition infringes or
|
||||||
|
misappropriates a valid United States patent, copyright or trade secret
|
||||||
|
right of a third party; provided that You give Cockroach Labs: (i)
|
||||||
|
prompt written notice of any such claim or allegation; (ii) sole control
|
||||||
|
of the defense and settlement thereof; and (iii) reasonable cooperation
|
||||||
|
and assistance in such defense or settlement. If any Work within
|
||||||
|
CockroachDB Enterprise Edition becomes or, in Cockroach Labs’ opinion,
|
||||||
|
is likely to become, the subject of an injunction, Cockroach Labs may,
|
||||||
|
at its option, (A) procure for You the right to continue using such
|
||||||
|
Work, (B) replace or modify such Work so that it becomes non-infringing
|
||||||
|
without substantially compromising its functionality, or, if (A) and (B)
|
||||||
|
are not commercially practicable, then (C) terminate Your license to the
|
||||||
|
allegedly infringing Work and refund to You a prorated portion of the
|
||||||
|
prepaid and unearned fees for such infringing Work. The foregoing
|
||||||
|
states the entire liability of Cockroach Labs with respect to
|
||||||
|
infringement of patents, copyrights, trade secrets or other intellectual
|
||||||
|
property rights.
|
||||||
|
|
||||||
|
(b) Exclusions. The foregoing obligations shall not apply to: (i) Works
|
||||||
|
modified by any party other than Cockroach Labs (including Enterprise
|
||||||
|
Derivative Works and Contributions), if the alleged infringement relates
|
||||||
|
to such modification, (ii) Works combined or bundled with any products,
|
||||||
|
processes or materials not provided by Cockroach Labs where the alleged
|
||||||
|
infringement relates to such combination, (iii) use of a version of
|
||||||
|
CockroachDB Enterprise Edition other than the version that was current
|
||||||
|
at the time of such use, as long as a non-infringing version had been
|
||||||
|
released, (iv) any Works created to Your specifications, (v)
|
||||||
|
infringement or misappropriation of any proprietary right in which You
|
||||||
|
have an interest, or (vi) Third Party Works. You will defend, indemnify
|
||||||
|
and hold Cockroach Labs harmless against any Losses arising from any
|
||||||
|
such claim or allegation, subject to conditions reciprocal to those in
|
||||||
|
Section 11(a).
|
||||||
|
|
||||||
|
12. Limitation of Liability. In no event and under no legal or equitable
|
||||||
|
theory, whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts), and notwithstanding anything in this Agreement to the
|
||||||
|
contrary, shall Licensor or any Contributor be liable to You for (i) any
|
||||||
|
amounts in excess, in the aggregate, of the fees paid by You to Cockroach
|
||||||
|
Labs under this Agreement in the twelve (12) months preceding the date the
|
||||||
|
first cause of liability arose), or (ii) any indirect, special,
|
||||||
|
incidental, punitive, exemplary, reliance, or consequential damages of any
|
||||||
|
character arising as a result of this Agreement or out of the use or
|
||||||
|
inability to use the Work (including but not limited to damages for loss
|
||||||
|
of goodwill, profits, data or data use, work stoppage, computer failure or
|
||||||
|
malfunction, cost of procurement of substitute goods, technology or
|
||||||
|
services, or any and all other commercial damages or losses), even if such
|
||||||
|
Licensor or Contributor has been advised of the possibility of such
|
||||||
|
damages. THESE LIMITATIONS SHALL APPLY NOTWITHSTANDING THE FAILURE OF THE
|
||||||
|
ESSENTIAL PURPOSE OF ANY LIMITED REMEDY.
|
||||||
|
|
||||||
|
13. Accepting Warranty or Additional Liability. While redistributing Works or
|
||||||
|
Derivative Works thereof, and without limiting your obligations under
|
||||||
|
Section 6, You may choose to offer, and charge a fee for, acceptance of
|
||||||
|
support, warranty, indemnity, or other liability obligations and/or rights
|
||||||
|
consistent with this License. However, in accepting such obligations, You
|
||||||
|
may act only on Your own behalf and on Your sole responsibility, not on
|
||||||
|
behalf of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold Cockroach Labs and each other Contributor harmless for
|
||||||
|
any liability incurred by, or claims asserted against, such Contributor by
|
||||||
|
reason of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
14. General.
|
||||||
|
|
||||||
|
(a) Relationship of Parties. You and Cockroach Labs are independent
|
||||||
|
contractors, and nothing herein shall be deemed to constitute either
|
||||||
|
party as the agent or representative of the other or both parties as
|
||||||
|
joint venturers or partners for any purpose.
|
||||||
|
|
||||||
|
(b) Export Control. You shall comply with the U.S. Foreign Corrupt
|
||||||
|
Practices Act and all applicable export laws, restrictions and
|
||||||
|
regulations of the U.S. Department of Commerce, and any other applicable
|
||||||
|
U.S. and foreign authority.
|
||||||
|
|
||||||
|
(c) Assignment. This Agreement and the rights and obligations herein may
|
||||||
|
not be assigned or transferred, in whole or in part, by You without the
|
||||||
|
prior written consent of Cockroach Labs. Any assignment in violation of
|
||||||
|
this provision is void. This Agreement shall be binding upon, and inure
|
||||||
|
to the benefit of, the successors and permitted assigns of the parties.
|
||||||
|
|
||||||
|
(d) Governing Law. This Agreement shall be governed by and construed under
|
||||||
|
the laws of the State of New York and the United States without regard
|
||||||
|
to conflicts of laws provisions thereof, and without regard to the
|
||||||
|
Uniform Computer Information Transactions Act.
|
||||||
|
|
||||||
|
(e) Attorneys’ Fees. In any action or proceeding to enforce rights under
|
||||||
|
this Agreement, the prevailing party shall be entitled to recover its
|
||||||
|
costs, expenses and attorneys’ fees.
|
||||||
|
|
||||||
|
(f) Severability. If any provision of this Agreement is held to be invalid,
|
||||||
|
illegal or unenforceable in any respect, that provision shall be limited
|
||||||
|
or eliminated to the minimum extent necessary so that this Agreement
|
||||||
|
otherwise remains in full force and effect and enforceable.
|
||||||
|
|
||||||
|
(g) Entire Agreement; Waivers; Modification. This Agreement constitutes the
|
||||||
|
entire agreement between the parties relating to the subject matter
|
||||||
|
hereof and supersedes all proposals, understandings, or discussions,
|
||||||
|
whether written or oral, relating to the subject matter of this
|
||||||
|
Agreement and all past dealing or industry custom. The failure of either
|
||||||
|
party to enforce its rights under this Agreement at any time for any
|
||||||
|
period shall not be construed as a waiver of such rights. No changes,
|
||||||
|
modifications or waivers to this Agreement will be effective unless in
|
||||||
|
writing and signed by both parties.
|
95
vendor/github.com/cockroachdb/cockroach/pkg/util/httputil/http.go
generated
vendored
Normal file
95
vendor/github.com/cockroachdb/cockroach/pkg/util/httputil/http.go
generated
vendored
Normal file
|
@ -0,0 +1,95 @@
|
||||||
|
// Copyright 2014 The Cockroach Authors.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
// implied. See the License for the specific language governing
|
||||||
|
// permissions and limitations under the License.
|
||||||
|
//
|
||||||
|
// Author: Spencer Kimball (spencer.kimball@gmail.com)
|
||||||
|
|
||||||
|
package httputil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/gogo/protobuf/jsonpb"
|
||||||
|
"github.com/gogo/protobuf/proto"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// AcceptHeader is the canonical header name for accept.
|
||||||
|
AcceptHeader = "Accept"
|
||||||
|
// AcceptEncodingHeader is the canonical header name for accept encoding.
|
||||||
|
AcceptEncodingHeader = "Accept-Encoding"
|
||||||
|
// ContentEncodingHeader is the canonical header name for content type.
|
||||||
|
ContentEncodingHeader = "Content-Encoding"
|
||||||
|
// ContentTypeHeader is the canonical header name for content type.
|
||||||
|
ContentTypeHeader = "Content-Type"
|
||||||
|
// JSONContentType is the JSON content type.
|
||||||
|
JSONContentType = "application/json"
|
||||||
|
// AltJSONContentType is the alternate JSON content type.
|
||||||
|
AltJSONContentType = "application/x-json"
|
||||||
|
// ProtoContentType is the protobuf content type.
|
||||||
|
ProtoContentType = "application/x-protobuf"
|
||||||
|
// AltProtoContentType is the alternate protobuf content type.
|
||||||
|
AltProtoContentType = "application/x-google-protobuf"
|
||||||
|
// PlaintextContentType is the plaintext content type.
|
||||||
|
PlaintextContentType = "text/plain"
|
||||||
|
// GzipEncoding is the gzip encoding.
|
||||||
|
GzipEncoding = "gzip"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetJSON uses the supplied client to GET the URL specified by the parameters
|
||||||
|
// and unmarshals the result into response.
|
||||||
|
func GetJSON(httpClient http.Client, path string, response proto.Message) error {
|
||||||
|
req, err := http.NewRequest("GET", path, nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return doJSONRequest(httpClient, req, response)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PostJSON uses the supplied client to POST request to the URL specified by
|
||||||
|
// the parameters and unmarshals the result into response.
|
||||||
|
func PostJSON(httpClient http.Client, path string, request, response proto.Message) error {
|
||||||
|
// Hack to avoid upsetting TestProtoMarshal().
|
||||||
|
marshalFn := (&jsonpb.Marshaler{}).Marshal
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := marshalFn(&buf, request); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
req, err := http.NewRequest("POST", path, &buf)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return doJSONRequest(httpClient, req, response)
|
||||||
|
}
|
||||||
|
|
||||||
|
func doJSONRequest(httpClient http.Client, req *http.Request, response proto.Message) error {
|
||||||
|
if timeout := httpClient.Timeout; timeout > 0 {
|
||||||
|
req.Header.Set("Grpc-Timeout", strconv.FormatInt(timeout.Nanoseconds(), 10)+"n")
|
||||||
|
}
|
||||||
|
req.Header.Set(AcceptHeader, JSONContentType)
|
||||||
|
resp, err := httpClient.Do(req)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if contentType := resp.Header.Get(ContentTypeHeader); !(resp.StatusCode == http.StatusOK && contentType == JSONContentType) {
|
||||||
|
b, err := ioutil.ReadAll(resp.Body)
|
||||||
|
return errors.Errorf("status: %s, content-type: %s, body: %s, error: %v", resp.Status, contentType, b, err)
|
||||||
|
}
|
||||||
|
return jsonpb.Unmarshal(resp.Body, response)
|
||||||
|
}
|
117
vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/clone.go
generated
vendored
Normal file
117
vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/clone.go
generated
vendored
Normal file
|
@ -0,0 +1,117 @@
|
||||||
|
// Copyright 2016 The Cockroach Authors.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
// implied. See the License for the specific language governing
|
||||||
|
// permissions and limitations under the License.
|
||||||
|
//
|
||||||
|
// Author: Tamir Duberstein (tamird@gmail.com)
|
||||||
|
|
||||||
|
package protoutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
|
||||||
|
"github.com/gogo/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
var verbotenKinds = [...]reflect.Kind{
|
||||||
|
reflect.Array,
|
||||||
|
}
|
||||||
|
|
||||||
|
type typeKey struct {
|
||||||
|
typ reflect.Type
|
||||||
|
verboten reflect.Kind
|
||||||
|
}
|
||||||
|
|
||||||
|
var types struct {
|
||||||
|
syncutil.Mutex
|
||||||
|
known map[typeKey]reflect.Type
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
types.known = make(map[typeKey]reflect.Type)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clone uses proto.Clone to return a deep copy of pb. It panics if pb
|
||||||
|
// recursively contains any instances of types which are known to be
|
||||||
|
// unsupported by proto.Clone.
|
||||||
|
//
|
||||||
|
// This function and its associated lint (see build/style_test.go) exist to
|
||||||
|
// ensure we do not attempt to proto.Clone types which are not supported by
|
||||||
|
// proto.Clone. This hackery is necessary because proto.Clone gives no direct
|
||||||
|
// indication that it has incompletely cloned a type; it merely logs to standard
|
||||||
|
// output (see
|
||||||
|
// https://github.com/golang/protobuf/blob/89238a3/proto/clone.go#L204).
|
||||||
|
//
|
||||||
|
// The concrete case against which this is currently guarding may be resolved
|
||||||
|
// upstream, see https://github.com/gogo/protobuf/issues/147.
|
||||||
|
func Clone(pb proto.Message) proto.Message {
|
||||||
|
for _, verbotenKind := range verbotenKinds {
|
||||||
|
if t := typeIsOrContainsVerboten(reflect.TypeOf(pb), verbotenKind); t != nil {
|
||||||
|
panic(fmt.Sprintf("attempt to clone %T, which contains uncloneable field of type %s", pb, t))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return proto.Clone(pb)
|
||||||
|
}
|
||||||
|
|
||||||
|
func typeIsOrContainsVerboten(t reflect.Type, verboten reflect.Kind) reflect.Type {
|
||||||
|
types.Lock()
|
||||||
|
defer types.Unlock()
|
||||||
|
|
||||||
|
return typeIsOrContainsVerbotenLocked(t, verboten)
|
||||||
|
}
|
||||||
|
|
||||||
|
func typeIsOrContainsVerbotenLocked(t reflect.Type, verboten reflect.Kind) reflect.Type {
|
||||||
|
key := typeKey{t, verboten}
|
||||||
|
knownTypeIsOrContainsVerboten, ok := types.known[key]
|
||||||
|
if !ok {
|
||||||
|
knownTypeIsOrContainsVerboten = typeIsOrContainsVerbotenImpl(t, verboten)
|
||||||
|
types.known[key] = knownTypeIsOrContainsVerboten
|
||||||
|
}
|
||||||
|
return knownTypeIsOrContainsVerboten
|
||||||
|
}
|
||||||
|
|
||||||
|
func typeIsOrContainsVerbotenImpl(t reflect.Type, verboten reflect.Kind) reflect.Type {
|
||||||
|
switch t.Kind() {
|
||||||
|
case verboten:
|
||||||
|
return t
|
||||||
|
|
||||||
|
case reflect.Map:
|
||||||
|
if key := typeIsOrContainsVerbotenLocked(t.Key(), verboten); key != nil {
|
||||||
|
return key
|
||||||
|
}
|
||||||
|
if value := typeIsOrContainsVerbotenLocked(t.Elem(), verboten); value != nil {
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Array, reflect.Ptr, reflect.Slice:
|
||||||
|
if value := typeIsOrContainsVerbotenLocked(t.Elem(), verboten); value != nil {
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Struct:
|
||||||
|
for i := 0; i < t.NumField(); i++ {
|
||||||
|
if field := typeIsOrContainsVerbotenLocked(t.Field(i).Type, verboten); field != nil {
|
||||||
|
return field
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case reflect.Chan, reflect.Func:
|
||||||
|
// Not strictly correct, but cloning these kinds is not allowed.
|
||||||
|
return t
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
128
vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/jsonpb_marshal.go
generated
vendored
Normal file
128
vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/jsonpb_marshal.go
generated
vendored
Normal file
|
@ -0,0 +1,128 @@
|
||||||
|
// Copyright 2016 The Cockroach Authors.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
// implied. See the License for the specific language governing
|
||||||
|
// permissions and limitations under the License.
|
||||||
|
//
|
||||||
|
// Author: Tamir Duberstein (tamird@gmail.com)
|
||||||
|
|
||||||
|
package protoutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/gogo/protobuf/jsonpb"
|
||||||
|
"github.com/gogo/protobuf/proto"
|
||||||
|
gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/cockroach/pkg/util/httputil"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ gwruntime.Marshaler = (*JSONPb)(nil)
|
||||||
|
|
||||||
|
var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
|
||||||
|
|
||||||
|
// JSONPb is a gwruntime.Marshaler that uses github.com/gogo/protobuf/jsonpb.
|
||||||
|
type JSONPb jsonpb.Marshaler
|
||||||
|
|
||||||
|
// ContentType implements gwruntime.Marshaler.
|
||||||
|
func (*JSONPb) ContentType() string {
|
||||||
|
return httputil.JSONContentType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal implements gwruntime.Marshaler.
|
||||||
|
func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
|
||||||
|
return j.marshal(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// a lower-case version of marshal to allow for a call from
|
||||||
|
// marshalNonProtoField without upsetting TestProtoMarshal().
|
||||||
|
func (j *JSONPb) marshal(v interface{}) ([]byte, error) {
|
||||||
|
if pb, ok := v.(proto.Message); ok {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
marshalFn := (*jsonpb.Marshaler)(j).Marshal
|
||||||
|
if err := marshalFn(&buf, pb); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
return j.marshalNonProtoField(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cribbed verbatim from grpc-gateway.
|
||||||
|
type protoEnum interface {
|
||||||
|
fmt.Stringer
|
||||||
|
EnumDescriptor() ([]byte, []int)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cribbed verbatim from grpc-gateway.
|
||||||
|
func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
for rv.Kind() == reflect.Ptr {
|
||||||
|
if rv.IsNil() {
|
||||||
|
return []byte("null"), nil
|
||||||
|
}
|
||||||
|
rv = rv.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if rv.Kind() == reflect.Map {
|
||||||
|
m := make(map[string]*json.RawMessage)
|
||||||
|
for _, k := range rv.MapKeys() {
|
||||||
|
buf, err := j.marshal(rv.MapIndex(k).Interface())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
|
||||||
|
}
|
||||||
|
if j.Indent != "" {
|
||||||
|
return json.MarshalIndent(m, "", j.Indent)
|
||||||
|
}
|
||||||
|
return json.Marshal(m)
|
||||||
|
}
|
||||||
|
if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts {
|
||||||
|
return json.Marshal(enum.String())
|
||||||
|
}
|
||||||
|
return json.Marshal(rv.Interface())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal implements gwruntime.Marshaler.
|
||||||
|
func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
|
||||||
|
if pb, ok := v.(proto.Message); ok {
|
||||||
|
return jsonpb.Unmarshal(bytes.NewReader(data), pb)
|
||||||
|
}
|
||||||
|
return errors.Errorf("unexpected type %T does not implement %s", v, typeProtoMessage)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder implements gwruntime.Marshaler.
|
||||||
|
func (j *JSONPb) NewDecoder(r io.Reader) gwruntime.Decoder {
|
||||||
|
return gwruntime.DecoderFunc(func(v interface{}) error {
|
||||||
|
if pb, ok := v.(proto.Message); ok {
|
||||||
|
return jsonpb.Unmarshal(r, pb)
|
||||||
|
}
|
||||||
|
return errors.Errorf("unexpected type %T does not implement %s", v, typeProtoMessage)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncoder implements gwruntime.Marshaler.
|
||||||
|
func (j *JSONPb) NewEncoder(w io.Writer) gwruntime.Encoder {
|
||||||
|
return gwruntime.EncoderFunc(func(v interface{}) error {
|
||||||
|
if pb, ok := v.(proto.Message); ok {
|
||||||
|
marshalFn := (*jsonpb.Marshaler)(j).Marshal
|
||||||
|
return marshalFn(w, pb)
|
||||||
|
}
|
||||||
|
return errors.Errorf("unexpected type %T does not implement %s", v, typeProtoMessage)
|
||||||
|
})
|
||||||
|
}
|
31
vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/marshal.go
generated
vendored
Normal file
31
vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/marshal.go
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
// Copyright 2016 The Cockroach Authors.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
// implied. See the License for the specific language governing
|
||||||
|
// permissions and limitations under the License.
|
||||||
|
//
|
||||||
|
// Author: Tamir Duberstein (tamird@gmail.com)
|
||||||
|
|
||||||
|
package protoutil
|
||||||
|
|
||||||
|
import "github.com/gogo/protobuf/proto"
|
||||||
|
|
||||||
|
// Interceptor will be called with every proto before it is marshalled.
|
||||||
|
// Interceptor is not safe to modify concurrently with calls to Marshal.
|
||||||
|
var Interceptor = func(_ proto.Message) {}
|
||||||
|
|
||||||
|
// Marshal uses proto.Marshal to encode pb into the wire format. It is used in
|
||||||
|
// some tests to intercept calls to proto.Marshal.
|
||||||
|
func Marshal(pb proto.Message) ([]byte, error) {
|
||||||
|
Interceptor(pb)
|
||||||
|
|
||||||
|
return proto.Marshal(pb)
|
||||||
|
}
|
96
vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/marshaler.go
generated
vendored
Normal file
96
vendor/github.com/cockroachdb/cockroach/pkg/util/protoutil/marshaler.go
generated
vendored
Normal file
|
@ -0,0 +1,96 @@
|
||||||
|
// Copyright 2016 The Cockroach Authors.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
// implied. See the License for the specific language governing
|
||||||
|
// permissions and limitations under the License.
|
||||||
|
//
|
||||||
|
// Author: Tamir Duberstein (tamird@gmail.com)
|
||||||
|
|
||||||
|
package protoutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"github.com/gogo/protobuf/proto"
|
||||||
|
gwruntime "github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
|
"github.com/cockroachdb/cockroach/pkg/util/httputil"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ gwruntime.Marshaler = (*ProtoPb)(nil)
|
||||||
|
|
||||||
|
// ProtoPb is a gwruntime.Marshaler that uses github.com/gogo/protobuf/proto.
|
||||||
|
type ProtoPb struct{}
|
||||||
|
|
||||||
|
// ContentType implements gwruntime.Marshaler.
|
||||||
|
func (*ProtoPb) ContentType() string {
|
||||||
|
return httputil.ProtoContentType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal implements gwruntime.Marshaler.
|
||||||
|
func (*ProtoPb) Marshal(v interface{}) ([]byte, error) {
|
||||||
|
if p, ok := v.(proto.Message); ok {
|
||||||
|
return Marshal(p)
|
||||||
|
}
|
||||||
|
return nil, errors.Errorf("unexpected type %T does not implement %s", v, typeProtoMessage)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal implements gwruntime.Marshaler.
|
||||||
|
func (*ProtoPb) Unmarshal(data []byte, v interface{}) error {
|
||||||
|
if p, ok := v.(proto.Message); ok {
|
||||||
|
return proto.Unmarshal(data, p)
|
||||||
|
}
|
||||||
|
return errors.Errorf("unexpected type %T does not implement %s", v, typeProtoMessage)
|
||||||
|
}
|
||||||
|
|
||||||
|
type protoDecoder struct {
|
||||||
|
r io.Reader
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder implements gwruntime.Marshaler.
|
||||||
|
func (*ProtoPb) NewDecoder(r io.Reader) gwruntime.Decoder {
|
||||||
|
return &protoDecoder{r: r}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode implements gwruntime.Marshaler.
|
||||||
|
func (d *protoDecoder) Decode(v interface{}) error {
|
||||||
|
if p, ok := v.(proto.Message); ok {
|
||||||
|
bytes, err := ioutil.ReadAll(d.r)
|
||||||
|
if err == nil {
|
||||||
|
err = proto.Unmarshal(bytes, p)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return errors.Errorf("unexpected type %T does not implement %s", v, typeProtoMessage)
|
||||||
|
}
|
||||||
|
|
||||||
|
type protoEncoder struct {
|
||||||
|
w io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncoder implements gwruntime.Marshaler.
|
||||||
|
func (*ProtoPb) NewEncoder(w io.Writer) gwruntime.Encoder {
|
||||||
|
return &protoEncoder{w: w}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode implements gwruntime.Marshaler.
|
||||||
|
func (e *protoEncoder) Encode(v interface{}) error {
|
||||||
|
if p, ok := v.(proto.Message); ok {
|
||||||
|
bytes, err := Marshal(p)
|
||||||
|
if err == nil {
|
||||||
|
_, err = e.w.Write(bytes)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return errors.Errorf("unexpected type %T does not implement %s", v, typeProtoMessage)
|
||||||
|
}
|
47
vendor/github.com/cockroachdb/cockroach/pkg/util/syncutil/mutex_deadlock.go
generated
vendored
Normal file
47
vendor/github.com/cockroachdb/cockroach/pkg/util/syncutil/mutex_deadlock.go
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
// Copyright 2016 The Cockroach Authors.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
// implied. See the License for the specific language governing
|
||||||
|
// permissions and limitations under the License.
|
||||||
|
//
|
||||||
|
// Author: Tamir Duberstein (tamird@gmail.com)
|
||||||
|
|
||||||
|
// +build deadlock
|
||||||
|
|
||||||
|
package syncutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
deadlock "github.com/sasha-s/go-deadlock"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
deadlock.Opts.DeadlockTimeout = 5 * time.Minute
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Mutex is a mutual exclusion lock.
|
||||||
|
type Mutex struct {
|
||||||
|
deadlock.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertHeld is a no-op for deadlock mutexes.
|
||||||
|
func (m *Mutex) AssertHeld() {
|
||||||
|
}
|
||||||
|
|
||||||
|
// An RWMutex is a reader/writer mutual exclusion lock.
|
||||||
|
type RWMutex struct {
|
||||||
|
deadlock.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertHeld is a no-op for deadlock mutexes.
|
||||||
|
func (m *RWMutex) AssertHeld() {
|
||||||
|
}
|
92
vendor/github.com/cockroachdb/cockroach/pkg/util/syncutil/mutex_sync.go
generated
vendored
Normal file
92
vendor/github.com/cockroachdb/cockroach/pkg/util/syncutil/mutex_sync.go
generated
vendored
Normal file
|
@ -0,0 +1,92 @@
|
||||||
|
// Copyright 2016 The Cockroach Authors.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
// implied. See the License for the specific language governing
|
||||||
|
// permissions and limitations under the License.
|
||||||
|
//
|
||||||
|
// Author: Tamir Duberstein (tamird@gmail.com)
|
||||||
|
|
||||||
|
// +build !deadlock
|
||||||
|
|
||||||
|
package syncutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Mutex is a mutual exclusion lock.
|
||||||
|
type Mutex struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
isLocked int32 // updated atomically
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock implements sync.Locker.
|
||||||
|
func (m *Mutex) Lock() {
|
||||||
|
m.mu.Lock()
|
||||||
|
atomic.StoreInt32(&m.isLocked, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unlock implements sync.Locker.
|
||||||
|
func (m *Mutex) Unlock() {
|
||||||
|
atomic.StoreInt32(&m.isLocked, 0)
|
||||||
|
m.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertHeld may panic if the mutex is not locked (but it is not required to
|
||||||
|
// do so). Functions which require that their callers hold a particular lock
|
||||||
|
// may use this to enforce this requirement more directly than relying on the
|
||||||
|
// race detector.
|
||||||
|
//
|
||||||
|
// Note that we do not require the lock to be held by any particular thread,
|
||||||
|
// just that some thread holds the lock. This is both more efficient and allows
|
||||||
|
// for rare cases where a mutex is locked in one thread and used in another.
|
||||||
|
func (m *Mutex) AssertHeld() {
|
||||||
|
if atomic.LoadInt32(&m.isLocked) == 0 {
|
||||||
|
panic("mutex is not locked")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(pmattis): Mutex.AssertHeld is neither used or tested. Silence unused
|
||||||
|
// warning.
|
||||||
|
var _ = (*Mutex).AssertHeld
|
||||||
|
|
||||||
|
// An RWMutex is a reader/writer mutual exclusion lock.
|
||||||
|
type RWMutex struct {
|
||||||
|
sync.RWMutex
|
||||||
|
isLocked int32 // updated atomically
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock implements sync.Locker.
|
||||||
|
func (m *RWMutex) Lock() {
|
||||||
|
m.RWMutex.Lock()
|
||||||
|
atomic.StoreInt32(&m.isLocked, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unlock implements sync.Locker.
|
||||||
|
func (m *RWMutex) Unlock() {
|
||||||
|
atomic.StoreInt32(&m.isLocked, 0)
|
||||||
|
m.RWMutex.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AssertHeld may panic if the mutex is not locked for writing (but it is not
|
||||||
|
// required to do so). Functions which require that their callers hold a
|
||||||
|
// particular lock may use this to enforce this requirement more directly than
|
||||||
|
// relying on the race detector.
|
||||||
|
//
|
||||||
|
// Note that we do not require the lock to be held by any particular thread,
|
||||||
|
// just that some thread holds the lock. This is both more efficient and allows
|
||||||
|
// for rare cases where a mutex is locked in one thread and used in another.
|
||||||
|
func (m *RWMutex) AssertHeld() {
|
||||||
|
if atomic.LoadInt32(&m.isLocked) == 0 {
|
||||||
|
panic("mutex is not locked")
|
||||||
|
}
|
||||||
|
}
|
979
vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go
generated
vendored
Normal file
979
vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go
generated
vendored
Normal file
|
@ -0,0 +1,979 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON.
|
||||||
|
It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json.
|
||||||
|
|
||||||
|
This package produces a different output than the standard "encoding/json" package,
|
||||||
|
which does not operate correctly on protocol buffers.
|
||||||
|
*/
|
||||||
|
package jsonpb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/gogo/protobuf/proto"
|
||||||
|
"github.com/gogo/protobuf/types"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Marshaler is a configurable object for converting between
|
||||||
|
// protocol buffer objects and a JSON representation for them.
|
||||||
|
type Marshaler struct {
|
||||||
|
// Whether to render enum values as integers, as opposed to string values.
|
||||||
|
EnumsAsInts bool
|
||||||
|
|
||||||
|
// Whether to render fields with zero values.
|
||||||
|
EmitDefaults bool
|
||||||
|
|
||||||
|
// A string to indent each level by. The presence of this field will
|
||||||
|
// also cause a space to appear between the field separator and
|
||||||
|
// value, and for newlines to be appear between fields and array
|
||||||
|
// elements.
|
||||||
|
Indent string
|
||||||
|
|
||||||
|
// Whether to use the original (.proto) name for fields.
|
||||||
|
OrigName bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal marshals a protocol buffer into JSON.
|
||||||
|
func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error {
|
||||||
|
writer := &errWriter{writer: out}
|
||||||
|
return m.marshalObject(writer, pb, "", "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalToString converts a protocol buffer object to JSON string.
|
||||||
|
func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := m.Marshal(&buf, pb); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return buf.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type int32Slice []int32
|
||||||
|
|
||||||
|
// For sorting extensions ids to ensure stable output.
|
||||||
|
func (s int32Slice) Len() int { return len(s) }
|
||||||
|
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||||
|
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
|
||||||
|
type isWkt interface {
|
||||||
|
XXX_WellKnownType() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalObject writes a struct to the Writer.
|
||||||
|
func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error {
|
||||||
|
s := reflect.ValueOf(v).Elem()
|
||||||
|
|
||||||
|
// Handle well-known types.
|
||||||
|
if wkt, ok := v.(isWkt); ok {
|
||||||
|
switch wkt.XXX_WellKnownType() {
|
||||||
|
case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
|
||||||
|
"Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
|
||||||
|
// "Wrappers use the same representation in JSON
|
||||||
|
// as the wrapped primitive type, ..."
|
||||||
|
sprop := proto.GetProperties(s.Type())
|
||||||
|
return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent)
|
||||||
|
case "Any":
|
||||||
|
// Any is a bit more involved.
|
||||||
|
return m.marshalAny(out, v, indent)
|
||||||
|
case "Duration":
|
||||||
|
// "Generated output always contains 3, 6, or 9 fractional digits,
|
||||||
|
// depending on required precision."
|
||||||
|
s, ns := s.Field(0).Int(), s.Field(1).Int()
|
||||||
|
d := time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond
|
||||||
|
x := fmt.Sprintf("%.9f", d.Seconds())
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
out.write(`"`)
|
||||||
|
out.write(x)
|
||||||
|
out.write(`s"`)
|
||||||
|
return out.err
|
||||||
|
case "Struct":
|
||||||
|
// Let marshalValue handle the `fields` map.
|
||||||
|
// TODO: pass the correct Properties if needed.
|
||||||
|
return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent)
|
||||||
|
case "Timestamp":
|
||||||
|
// "RFC 3339, where generated output will always be Z-normalized
|
||||||
|
// and uses 3, 6 or 9 fractional digits."
|
||||||
|
s, ns := s.Field(0).Int(), s.Field(1).Int()
|
||||||
|
t := time.Unix(s, ns).UTC()
|
||||||
|
// time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
|
||||||
|
x := t.Format("2006-01-02T15:04:05.000000000")
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
out.write(`"`)
|
||||||
|
out.write(x)
|
||||||
|
out.write(`Z"`)
|
||||||
|
return out.err
|
||||||
|
case "Value":
|
||||||
|
// Value has a single oneof.
|
||||||
|
kind := s.Field(0)
|
||||||
|
if kind.IsNil() {
|
||||||
|
// "absence of any variant indicates an error"
|
||||||
|
return errors.New("nil Value")
|
||||||
|
}
|
||||||
|
// oneof -> *T -> T -> T.F
|
||||||
|
x := kind.Elem().Elem().Field(0)
|
||||||
|
// TODO: pass the correct Properties if needed.
|
||||||
|
return m.marshalValue(out, &proto.Properties{}, x, indent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out.write("{")
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
firstField := true
|
||||||
|
|
||||||
|
if typeURL != "" {
|
||||||
|
if err := m.marshalTypeURL(out, indent, typeURL); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
firstField = false
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < s.NumField(); i++ {
|
||||||
|
value := s.Field(i)
|
||||||
|
valueField := s.Type().Field(i)
|
||||||
|
if strings.HasPrefix(valueField.Name, "XXX_") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNil will panic on most value kinds.
|
||||||
|
switch value.Kind() {
|
||||||
|
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||||
|
if value.IsNil() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !m.EmitDefaults {
|
||||||
|
switch value.Kind() {
|
||||||
|
case reflect.Bool:
|
||||||
|
if !value.Bool() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case reflect.Int32, reflect.Int64:
|
||||||
|
if value.Int() == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case reflect.Uint32, reflect.Uint64:
|
||||||
|
if value.Uint() == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
if value.Float() == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case reflect.String:
|
||||||
|
if value.Len() == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Oneof fields need special handling.
|
||||||
|
if valueField.Tag.Get("protobuf_oneof") != "" {
|
||||||
|
// value is an interface containing &T{real_value}.
|
||||||
|
sv := value.Elem().Elem() // interface -> *T -> T
|
||||||
|
value = sv.Field(0)
|
||||||
|
valueField = sv.Type().Field(0)
|
||||||
|
}
|
||||||
|
prop := jsonProperties(valueField, m.OrigName)
|
||||||
|
if !firstField {
|
||||||
|
m.writeSep(out)
|
||||||
|
}
|
||||||
|
// If the map value is a cast type, it may not implement proto.Message, therefore
|
||||||
|
// allow the struct tag to declare the underlying message type. Instead of changing
|
||||||
|
// the signatures of the child types (and because prop.mvalue is not public), use
|
||||||
|
// CustomType as a passer.
|
||||||
|
if value.Kind() == reflect.Map {
|
||||||
|
if tag := valueField.Tag.Get("protobuf"); tag != "" {
|
||||||
|
for _, v := range strings.Split(tag, ",") {
|
||||||
|
if !strings.HasPrefix(v, "castvaluetype=") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
v = strings.TrimPrefix(v, "castvaluetype=")
|
||||||
|
prop.CustomType = v
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := m.marshalField(out, prop, value, indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
firstField = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle proto2 extensions.
|
||||||
|
if ep, ok := v.(proto.Message); ok {
|
||||||
|
extensions := proto.RegisteredExtensions(v)
|
||||||
|
// Sort extensions for stable output.
|
||||||
|
ids := make([]int32, 0, len(extensions))
|
||||||
|
for id, desc := range extensions {
|
||||||
|
if !proto.HasExtension(ep, desc) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ids = append(ids, id)
|
||||||
|
}
|
||||||
|
sort.Sort(int32Slice(ids))
|
||||||
|
for _, id := range ids {
|
||||||
|
desc := extensions[id]
|
||||||
|
if desc == nil {
|
||||||
|
// unknown extension
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ext, extErr := proto.GetExtension(ep, desc)
|
||||||
|
if extErr != nil {
|
||||||
|
return extErr
|
||||||
|
}
|
||||||
|
value := reflect.ValueOf(ext)
|
||||||
|
var prop proto.Properties
|
||||||
|
prop.Parse(desc.Tag)
|
||||||
|
prop.JSONName = fmt.Sprintf("[%s]", desc.Name)
|
||||||
|
if !firstField {
|
||||||
|
m.writeSep(out)
|
||||||
|
}
|
||||||
|
if err := m.marshalField(out, &prop, value, indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
firstField = false
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
out.write(indent)
|
||||||
|
}
|
||||||
|
out.write("}")
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Marshaler) writeSep(out *errWriter) {
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write(",\n")
|
||||||
|
} else {
|
||||||
|
out.write(",")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error {
|
||||||
|
// "If the Any contains a value that has a special JSON mapping,
|
||||||
|
// it will be converted as follows: {"@type": xxx, "value": yyy}.
|
||||||
|
// Otherwise, the value will be converted into a JSON object,
|
||||||
|
// and the "@type" field will be inserted to indicate the actual data type."
|
||||||
|
v := reflect.ValueOf(any).Elem()
|
||||||
|
turl := v.Field(0).String()
|
||||||
|
val := v.Field(1).Bytes()
|
||||||
|
|
||||||
|
// Only the part of type_url after the last slash is relevant.
|
||||||
|
mname := turl
|
||||||
|
if slash := strings.LastIndex(mname, "/"); slash >= 0 {
|
||||||
|
mname = mname[slash+1:]
|
||||||
|
}
|
||||||
|
mt := proto.MessageType(mname)
|
||||||
|
if mt == nil {
|
||||||
|
return fmt.Errorf("unknown message type %q", mname)
|
||||||
|
}
|
||||||
|
msg := reflect.New(mt.Elem()).Interface().(proto.Message)
|
||||||
|
if err := proto.Unmarshal(val, msg); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := msg.(isWkt); ok {
|
||||||
|
out.write("{")
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
}
|
||||||
|
if err := m.marshalTypeURL(out, indent, turl); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.writeSep(out)
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write(indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
out.write(`"value": `)
|
||||||
|
} else {
|
||||||
|
out.write(`"value":`)
|
||||||
|
}
|
||||||
|
if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
out.write(indent)
|
||||||
|
}
|
||||||
|
out.write("}")
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.marshalObject(out, msg, indent, turl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error {
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write(indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
}
|
||||||
|
out.write(`"@type":`)
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write(" ")
|
||||||
|
}
|
||||||
|
b, err := json.Marshal(typeURL)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
out.write(string(b))
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalField writes field description and value to the Writer.
|
||||||
|
func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write(indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
}
|
||||||
|
out.write(`"`)
|
||||||
|
out.write(prop.JSONName)
|
||||||
|
out.write(`":`)
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write(" ")
|
||||||
|
}
|
||||||
|
if err := m.marshalValue(out, prop, v, indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalValue writes the value to the Writer.
|
||||||
|
func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
|
||||||
|
|
||||||
|
v = reflect.Indirect(v)
|
||||||
|
|
||||||
|
// Handle repeated elements.
|
||||||
|
if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
|
||||||
|
out.write("[")
|
||||||
|
comma := ""
|
||||||
|
for i := 0; i < v.Len(); i++ {
|
||||||
|
sliceVal := v.Index(i)
|
||||||
|
out.write(comma)
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
out.write(indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
}
|
||||||
|
if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
comma = ","
|
||||||
|
}
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
out.write(indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
}
|
||||||
|
out.write("]")
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle well-known types.
|
||||||
|
// Most are handled up in marshalObject (because 99% are messages).
|
||||||
|
if wkt, ok := v.Interface().(isWkt); ok {
|
||||||
|
switch wkt.XXX_WellKnownType() {
|
||||||
|
case "NullValue":
|
||||||
|
out.write("null")
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if t, ok := v.Interface().(time.Time); ok {
|
||||||
|
ts, err := types.TimestampProto(t)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return m.marshalValue(out, prop, reflect.ValueOf(ts), indent)
|
||||||
|
}
|
||||||
|
|
||||||
|
if d, ok := v.Interface().(time.Duration); ok {
|
||||||
|
dur := types.DurationProto(d)
|
||||||
|
return m.marshalValue(out, prop, reflect.ValueOf(dur), indent)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle enumerations.
|
||||||
|
if !m.EnumsAsInts && prop.Enum != "" {
|
||||||
|
// Unknown enum values will are stringified by the proto library as their
|
||||||
|
// value. Such values should _not_ be quoted or they will be interpreted
|
||||||
|
// as an enum string instead of their value.
|
||||||
|
enumStr := v.Interface().(fmt.Stringer).String()
|
||||||
|
var valStr string
|
||||||
|
if v.Kind() == reflect.Ptr {
|
||||||
|
valStr = strconv.Itoa(int(v.Elem().Int()))
|
||||||
|
} else {
|
||||||
|
valStr = strconv.Itoa(int(v.Int()))
|
||||||
|
}
|
||||||
|
|
||||||
|
if m, ok := v.Interface().(interface {
|
||||||
|
MarshalJSON() ([]byte, error)
|
||||||
|
}); ok {
|
||||||
|
data, err := m.MarshalJSON()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
enumStr = string(data)
|
||||||
|
enumStr, err = strconv.Unquote(enumStr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
isKnownEnum := enumStr != valStr
|
||||||
|
|
||||||
|
if isKnownEnum {
|
||||||
|
out.write(`"`)
|
||||||
|
}
|
||||||
|
out.write(enumStr)
|
||||||
|
if isKnownEnum {
|
||||||
|
out.write(`"`)
|
||||||
|
}
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle nested messages.
|
||||||
|
if v.Kind() == reflect.Struct {
|
||||||
|
i := v
|
||||||
|
if v.CanAddr() {
|
||||||
|
i = v.Addr()
|
||||||
|
} else {
|
||||||
|
i = reflect.New(v.Type())
|
||||||
|
i.Elem().Set(v)
|
||||||
|
}
|
||||||
|
iface := i.Interface()
|
||||||
|
if iface == nil {
|
||||||
|
out.write(`null`)
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
|
||||||
|
if m, ok := v.Interface().(interface {
|
||||||
|
MarshalJSON() ([]byte, error)
|
||||||
|
}); ok {
|
||||||
|
data, err := m.MarshalJSON()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
out.write(string(data))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
pm, ok := iface.(proto.Message)
|
||||||
|
if !ok {
|
||||||
|
if prop.CustomType == "" {
|
||||||
|
return fmt.Errorf("%v does not implement proto.Message", v.Type())
|
||||||
|
}
|
||||||
|
t := proto.MessageType(prop.CustomType)
|
||||||
|
if t == nil || !i.Type().ConvertibleTo(t) {
|
||||||
|
return fmt.Errorf("%v declared custom type %s but it is not convertible to %v", v.Type(), prop.CustomType, t)
|
||||||
|
}
|
||||||
|
pm = i.Convert(t).Interface().(proto.Message)
|
||||||
|
}
|
||||||
|
return m.marshalObject(out, pm, indent+m.Indent, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle maps.
|
||||||
|
// Since Go randomizes map iteration, we sort keys for stable output.
|
||||||
|
if v.Kind() == reflect.Map {
|
||||||
|
out.write(`{`)
|
||||||
|
keys := v.MapKeys()
|
||||||
|
sort.Sort(mapKeys(keys))
|
||||||
|
for i, k := range keys {
|
||||||
|
if i > 0 {
|
||||||
|
out.write(`,`)
|
||||||
|
}
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
out.write(indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := json.Marshal(k.Interface())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s := string(b)
|
||||||
|
|
||||||
|
// If the JSON is not a string value, encode it again to make it one.
|
||||||
|
if !strings.HasPrefix(s, `"`) {
|
||||||
|
b, err := json.Marshal(s)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s = string(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
out.write(s)
|
||||||
|
out.write(`:`)
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write(` `)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := m.marshalValue(out, prop, v.MapIndex(k), indent+m.Indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
out.write(indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
}
|
||||||
|
out.write(`}`)
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default handling defers to the encoding/json library.
|
||||||
|
b, err := json.Marshal(v.Interface())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64)
|
||||||
|
if needToQuote {
|
||||||
|
out.write(`"`)
|
||||||
|
}
|
||||||
|
out.write(string(b))
|
||||||
|
if needToQuote {
|
||||||
|
out.write(`"`)
|
||||||
|
}
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshaler is a configurable object for converting from a JSON
|
||||||
|
// representation to a protocol buffer object.
|
||||||
|
type Unmarshaler struct {
|
||||||
|
// Whether to allow messages to contain unknown fields, as opposed to
|
||||||
|
// failing to unmarshal.
|
||||||
|
AllowUnknownFields bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
|
||||||
|
// This function is lenient and will decode any options permutations of the
|
||||||
|
// related Marshaler.
|
||||||
|
func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
|
||||||
|
inputValue := json.RawMessage{}
|
||||||
|
if err := dec.Decode(&inputValue); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal unmarshals a JSON object stream into a protocol
|
||||||
|
// buffer. This function is lenient and will decode any options
|
||||||
|
// permutations of the related Marshaler.
|
||||||
|
func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error {
|
||||||
|
dec := json.NewDecoder(r)
|
||||||
|
return u.UnmarshalNext(dec, pb)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
|
||||||
|
// This function is lenient and will decode any options permutations of the
|
||||||
|
// related Marshaler.
|
||||||
|
func UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
|
||||||
|
return new(Unmarshaler).UnmarshalNext(dec, pb)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal unmarshals a JSON object stream into a protocol
|
||||||
|
// buffer. This function is lenient and will decode any options
|
||||||
|
// permutations of the related Marshaler.
|
||||||
|
func Unmarshal(r io.Reader, pb proto.Message) error {
|
||||||
|
return new(Unmarshaler).Unmarshal(r, pb)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalString will populate the fields of a protocol buffer based
|
||||||
|
// on a JSON string. This function is lenient and will decode any options
|
||||||
|
// permutations of the related Marshaler.
|
||||||
|
func UnmarshalString(str string, pb proto.Message) error {
|
||||||
|
return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb)
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmarshalValue converts/copies a value into the target.
|
||||||
|
// prop may be nil.
|
||||||
|
func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error {
|
||||||
|
targetType := target.Type()
|
||||||
|
|
||||||
|
// Allocate memory for pointer fields.
|
||||||
|
if targetType.Kind() == reflect.Ptr {
|
||||||
|
target.Set(reflect.New(targetType.Elem()))
|
||||||
|
return u.unmarshalValue(target.Elem(), inputValue, prop)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle well-known types.
|
||||||
|
if wkt, ok := target.Addr().Interface().(isWkt); ok {
|
||||||
|
switch wkt.XXX_WellKnownType() {
|
||||||
|
case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
|
||||||
|
"Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
|
||||||
|
// "Wrappers use the same representation in JSON
|
||||||
|
// as the wrapped primitive type, except that null is allowed."
|
||||||
|
// encoding/json will turn JSON `null` into Go `nil`,
|
||||||
|
// so we don't have to do any extra work.
|
||||||
|
return u.unmarshalValue(target.Field(0), inputValue, prop)
|
||||||
|
case "Any":
|
||||||
|
return fmt.Errorf("unmarshaling Any not supported yet")
|
||||||
|
case "Duration":
|
||||||
|
ivStr := string(inputValue)
|
||||||
|
if ivStr == "null" {
|
||||||
|
target.Field(0).SetInt(0)
|
||||||
|
target.Field(1).SetInt(0)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
unq, err := strconv.Unquote(ivStr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d, err := time.ParseDuration(unq)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("bad Duration: %v", err)
|
||||||
|
}
|
||||||
|
ns := d.Nanoseconds()
|
||||||
|
s := ns / 1e9
|
||||||
|
ns %= 1e9
|
||||||
|
target.Field(0).SetInt(s)
|
||||||
|
target.Field(1).SetInt(ns)
|
||||||
|
return nil
|
||||||
|
case "Timestamp":
|
||||||
|
ivStr := string(inputValue)
|
||||||
|
if ivStr == "null" {
|
||||||
|
target.Field(0).SetInt(0)
|
||||||
|
target.Field(1).SetInt(0)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
unq, err := strconv.Unquote(ivStr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t, err := time.Parse(time.RFC3339Nano, unq)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("bad Timestamp: %v", err)
|
||||||
|
}
|
||||||
|
target.Field(0).SetInt(int64(t.Unix()))
|
||||||
|
target.Field(1).SetInt(int64(t.Nanosecond()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if t, ok := target.Addr().Interface().(*time.Time); ok {
|
||||||
|
ts := &types.Timestamp{}
|
||||||
|
if err := u.unmarshalValue(reflect.ValueOf(ts).Elem(), inputValue, prop); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tt, err := types.TimestampFromProto(ts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*t = tt
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if d, ok := target.Addr().Interface().(*time.Duration); ok {
|
||||||
|
dur := &types.Duration{}
|
||||||
|
if err := u.unmarshalValue(reflect.ValueOf(dur).Elem(), inputValue, prop); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dd, err := types.DurationFromProto(dur)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*d = dd
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle enums, which have an underlying type of int32,
|
||||||
|
// and may appear as strings.
|
||||||
|
// The case of an enum appearing as a number is handled
|
||||||
|
// at the bottom of this function.
|
||||||
|
if inputValue[0] == '"' && prop != nil && prop.Enum != "" {
|
||||||
|
vmap := proto.EnumValueMap(prop.Enum)
|
||||||
|
// Don't need to do unquoting; valid enum names
|
||||||
|
// are from a limited character set.
|
||||||
|
s := inputValue[1 : len(inputValue)-1]
|
||||||
|
n, ok := vmap[string(s)]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum)
|
||||||
|
}
|
||||||
|
if target.Kind() == reflect.Ptr { // proto2
|
||||||
|
target.Set(reflect.New(targetType.Elem()))
|
||||||
|
target = target.Elem()
|
||||||
|
}
|
||||||
|
target.SetInt(int64(n))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle nested messages.
|
||||||
|
if targetType.Kind() == reflect.Struct {
|
||||||
|
if target.CanAddr() {
|
||||||
|
if m, ok := target.Addr().Interface().(interface {
|
||||||
|
UnmarshalJSON([]byte) error
|
||||||
|
}); ok {
|
||||||
|
return json.Unmarshal(inputValue, m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var jsonFields map[string]json.RawMessage
|
||||||
|
if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
consumeField := func(prop *proto.Properties) (json.RawMessage, bool) {
|
||||||
|
// Be liberal in what names we accept; both orig_name and camelName are okay.
|
||||||
|
fieldNames := acceptedJSONFieldNames(prop)
|
||||||
|
|
||||||
|
vOrig, okOrig := jsonFields[fieldNames.orig]
|
||||||
|
vCamel, okCamel := jsonFields[fieldNames.camel]
|
||||||
|
if !okOrig && !okCamel {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
// If, for some reason, both are present in the data, favour the camelName.
|
||||||
|
var raw json.RawMessage
|
||||||
|
if okOrig {
|
||||||
|
raw = vOrig
|
||||||
|
delete(jsonFields, fieldNames.orig)
|
||||||
|
}
|
||||||
|
if okCamel {
|
||||||
|
raw = vCamel
|
||||||
|
delete(jsonFields, fieldNames.camel)
|
||||||
|
}
|
||||||
|
return raw, true
|
||||||
|
}
|
||||||
|
|
||||||
|
sprops := proto.GetProperties(targetType)
|
||||||
|
for i := 0; i < target.NumField(); i++ {
|
||||||
|
ft := target.Type().Field(i)
|
||||||
|
if strings.HasPrefix(ft.Name, "XXX_") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
valueForField, ok := consumeField(sprops.Prop[i])
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Check for any oneof fields.
|
||||||
|
if len(jsonFields) > 0 {
|
||||||
|
for _, oop := range sprops.OneofTypes {
|
||||||
|
raw, ok := consumeField(oop.Prop)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
nv := reflect.New(oop.Type.Elem())
|
||||||
|
target.Field(oop.Field).Set(nv)
|
||||||
|
if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !u.AllowUnknownFields && len(jsonFields) > 0 {
|
||||||
|
// Pick any field to be the scapegoat.
|
||||||
|
var f string
|
||||||
|
for fname := range jsonFields {
|
||||||
|
f = fname
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unknown field %q in %v", f, targetType)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle arrays
|
||||||
|
if targetType.Kind() == reflect.Slice {
|
||||||
|
if targetType.Elem().Kind() == reflect.Uint8 {
|
||||||
|
outRef := reflect.New(targetType)
|
||||||
|
outVal := outRef.Interface()
|
||||||
|
//CustomType with underlying type []byte
|
||||||
|
if _, ok := outVal.(interface {
|
||||||
|
UnmarshalJSON([]byte) error
|
||||||
|
}); ok {
|
||||||
|
if err := json.Unmarshal(inputValue, outVal); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
target.Set(outRef.Elem())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// Special case for encoded bytes. Pre-go1.5 doesn't support unmarshalling
|
||||||
|
// strings into aliased []byte types.
|
||||||
|
// https://github.com/golang/go/commit/4302fd0409da5e4f1d71471a6770dacdc3301197
|
||||||
|
// https://github.com/golang/go/commit/c60707b14d6be26bf4213114d13070bff00d0b0a
|
||||||
|
var out []byte
|
||||||
|
if err := json.Unmarshal(inputValue, &out); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
target.SetBytes(out)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var slc []json.RawMessage
|
||||||
|
if err := json.Unmarshal(inputValue, &slc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
len := len(slc)
|
||||||
|
target.Set(reflect.MakeSlice(targetType, len, len))
|
||||||
|
for i := 0; i < len; i++ {
|
||||||
|
if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle maps (whose keys are always strings)
|
||||||
|
if targetType.Kind() == reflect.Map {
|
||||||
|
var mp map[string]json.RawMessage
|
||||||
|
if err := json.Unmarshal(inputValue, &mp); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
target.Set(reflect.MakeMap(targetType))
|
||||||
|
var keyprop, valprop *proto.Properties
|
||||||
|
if prop != nil {
|
||||||
|
// These could still be nil if the protobuf metadata is broken somehow.
|
||||||
|
// TODO: This won't work because the fields are unexported.
|
||||||
|
// We should probably just reparse them.
|
||||||
|
//keyprop, valprop = prop.mkeyprop, prop.mvalprop
|
||||||
|
}
|
||||||
|
for ks, raw := range mp {
|
||||||
|
// Unmarshal map key. The core json library already decoded the key into a
|
||||||
|
// string, so we handle that specially. Other types were quoted post-serialization.
|
||||||
|
var k reflect.Value
|
||||||
|
if targetType.Key().Kind() == reflect.String {
|
||||||
|
k = reflect.ValueOf(ks)
|
||||||
|
} else {
|
||||||
|
k = reflect.New(targetType.Key()).Elem()
|
||||||
|
if err := u.unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !k.Type().AssignableTo(targetType.Key()) {
|
||||||
|
k = k.Convert(targetType.Key())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal map value.
|
||||||
|
v := reflect.New(targetType.Elem()).Elem()
|
||||||
|
if err := u.unmarshalValue(v, raw, valprop); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
target.SetMapIndex(k, v)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 64-bit integers can be encoded as strings. In this case we drop
|
||||||
|
// the quotes and proceed as normal.
|
||||||
|
isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64
|
||||||
|
if isNum && strings.HasPrefix(string(inputValue), `"`) {
|
||||||
|
inputValue = inputValue[1 : len(inputValue)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use the encoding/json for parsing other value types.
|
||||||
|
return json.Unmarshal(inputValue, target.Addr().Interface())
|
||||||
|
}
|
||||||
|
|
||||||
|
// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute.
|
||||||
|
func jsonProperties(f reflect.StructField, origName bool) *proto.Properties {
|
||||||
|
var prop proto.Properties
|
||||||
|
prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f)
|
||||||
|
if origName || prop.JSONName == "" {
|
||||||
|
prop.JSONName = prop.OrigName
|
||||||
|
}
|
||||||
|
return &prop
|
||||||
|
}
|
||||||
|
|
||||||
|
type fieldNames struct {
|
||||||
|
orig, camel string
|
||||||
|
}
|
||||||
|
|
||||||
|
func acceptedJSONFieldNames(prop *proto.Properties) fieldNames {
|
||||||
|
opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName}
|
||||||
|
if prop.JSONName != "" {
|
||||||
|
opts.camel = prop.JSONName
|
||||||
|
}
|
||||||
|
return opts
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writer wrapper inspired by https://blog.golang.org/errors-are-values
|
||||||
|
type errWriter struct {
|
||||||
|
writer io.Writer
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *errWriter) write(str string) {
|
||||||
|
if w.err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_, w.err = w.writer.Write([]byte(str))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map fields may have key types of non-float scalars, strings and enums.
|
||||||
|
// The easiest way to sort them in some deterministic order is to use fmt.
|
||||||
|
// If this turns out to be inefficient we can always consider other options,
|
||||||
|
// such as doing a Schwartzian transform.
|
||||||
|
//
|
||||||
|
// Numeric keys are sorted in numeric order per
|
||||||
|
// https://developers.google.com/protocol-buffers/docs/proto#maps.
|
||||||
|
type mapKeys []reflect.Value
|
||||||
|
|
||||||
|
func (s mapKeys) Len() int { return len(s) }
|
||||||
|
func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
func (s mapKeys) Less(i, j int) bool {
|
||||||
|
if k := s[i].Kind(); k == s[j].Kind() {
|
||||||
|
switch k {
|
||||||
|
case reflect.Int32, reflect.Int64:
|
||||||
|
return s[i].Int() < s[j].Int()
|
||||||
|
case reflect.Uint32, reflect.Uint64:
|
||||||
|
return s[i].Uint() < s[j].Uint()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())
|
||||||
|
}
|
39
vendor/github.com/gogo/protobuf/types/Makefile
generated
vendored
Normal file
39
vendor/github.com/gogo/protobuf/types/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
# Protocol Buffers for Go with Gadgets
|
||||||
|
#
|
||||||
|
# Copyright (c) 2016, The GoGo Authors. All rights reserved.
|
||||||
|
# http://github.com/gogo/protobuf
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above
|
||||||
|
# copyright notice, this list of conditions and the following disclaimer
|
||||||
|
# in the documentation and/or other materials provided with the
|
||||||
|
# distribution.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
regenerate:
|
||||||
|
go install github.com/gogo/protobuf/protoc-gen-gogotypes
|
||||||
|
go install github.com/gogo/protobuf/protoc-min-version
|
||||||
|
|
||||||
|
protoc-min-version --version="3.0.0" --gogotypes_out=. -I=../protobuf/google/protobuf ../protobuf/google/protobuf/any.proto
|
||||||
|
protoc-min-version --version="3.0.0" --gogotypes_out=. -I=../protobuf/google/protobuf ../protobuf/google/protobuf/empty.proto
|
||||||
|
protoc-min-version --version="3.0.0" --gogotypes_out=. -I=../protobuf/google/protobuf ../protobuf/google/protobuf/timestamp.proto
|
||||||
|
protoc-min-version --version="3.0.0" --gogotypes_out=. -I=../protobuf/google/protobuf ../protobuf/google/protobuf/duration.proto
|
||||||
|
protoc-min-version --version="3.0.0" --gogotypes_out=. -I=../protobuf/google/protobuf ../protobuf/google/protobuf/struct.proto
|
||||||
|
protoc-min-version --version="3.0.0" --gogotypes_out=. -I=../protobuf/google/protobuf ../protobuf/google/protobuf/wrappers.proto
|
||||||
|
protoc-min-version --version="3.0.0" --gogotypes_out=. -I=../protobuf/google/protobuf ../protobuf/google/protobuf/field_mask.proto
|
135
vendor/github.com/gogo/protobuf/types/any.go
generated
vendored
Normal file
135
vendor/github.com/gogo/protobuf/types/any.go
generated
vendored
Normal file
|
@ -0,0 +1,135 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
// This file implements functions to marshal proto.Message to/from
|
||||||
|
// google.protobuf.Any message.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/gogo/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
const googleApis = "type.googleapis.com/"
|
||||||
|
|
||||||
|
// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
|
||||||
|
//
|
||||||
|
// Note that regular type assertions should be done using the Is
|
||||||
|
// function. AnyMessageName is provided for less common use cases like filtering a
|
||||||
|
// sequence of Any messages based on a set of allowed message type names.
|
||||||
|
func AnyMessageName(any *Any) (string, error) {
|
||||||
|
slash := strings.LastIndex(any.TypeUrl, "/")
|
||||||
|
if slash < 0 {
|
||||||
|
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
|
||||||
|
}
|
||||||
|
return any.TypeUrl[slash+1:], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
|
||||||
|
func MarshalAny(pb proto.Message) (*Any, error) {
|
||||||
|
value, err := proto.Marshal(pb)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
||||||
|
// allocate a proto.Message for the type specified in a google.protobuf.Any
|
||||||
|
// message. The allocated message is stored in the embedded proto.Message.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// var x ptypes.DynamicAny
|
||||||
|
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
||||||
|
// fmt.Printf("unmarshaled message: %v", x.Message)
|
||||||
|
type DynamicAny struct {
|
||||||
|
proto.Message
|
||||||
|
}
|
||||||
|
|
||||||
|
// Empty returns a new proto.Message of the type specified in a
|
||||||
|
// google.protobuf.Any message. It returns an error if corresponding message
|
||||||
|
// type isn't linked in.
|
||||||
|
func EmptyAny(any *Any) (proto.Message, error) {
|
||||||
|
aname, err := AnyMessageName(any)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
t := proto.MessageType(aname)
|
||||||
|
if t == nil {
|
||||||
|
return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
|
||||||
|
}
|
||||||
|
return reflect.New(t.Elem()).Interface().(proto.Message), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
|
||||||
|
// message and places the decoded result in pb. It returns an error if type of
|
||||||
|
// contents of Any message does not match type of pb message.
|
||||||
|
//
|
||||||
|
// pb can be a proto.Message, or a *DynamicAny.
|
||||||
|
func UnmarshalAny(any *Any, pb proto.Message) error {
|
||||||
|
if d, ok := pb.(*DynamicAny); ok {
|
||||||
|
if d.Message == nil {
|
||||||
|
var err error
|
||||||
|
d.Message, err = EmptyAny(any)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return UnmarshalAny(any, d.Message)
|
||||||
|
}
|
||||||
|
|
||||||
|
aname, err := AnyMessageName(any)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
mname := proto.MessageName(pb)
|
||||||
|
if aname != mname {
|
||||||
|
return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
|
||||||
|
}
|
||||||
|
return proto.Unmarshal(any.Value, pb)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Is returns true if any value contains a given message type.
|
||||||
|
func Is(any *Any, pb proto.Message) bool {
|
||||||
|
aname, err := AnyMessageName(any)
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
return aname == proto.MessageName(pb)
|
||||||
|
}
|
666
vendor/github.com/gogo/protobuf/types/any.pb.go
generated
vendored
Normal file
666
vendor/github.com/gogo/protobuf/types/any.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,666 @@
|
||||||
|
// Code generated by protoc-gen-gogo.
|
||||||
|
// source: any.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package types is a generated protocol buffer package.
|
||||||
|
|
||||||
|
It is generated from these files:
|
||||||
|
any.proto
|
||||||
|
|
||||||
|
It has these top-level messages:
|
||||||
|
Any
|
||||||
|
*/
|
||||||
|
package types
|
||||||
|
|
||||||
|
import proto "github.com/gogo/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
|
||||||
|
import bytes "bytes"
|
||||||
|
|
||||||
|
import strings "strings"
|
||||||
|
import reflect "reflect"
|
||||||
|
|
||||||
|
import io "io"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
|
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||||
|
// URL that describes the type of the serialized message.
|
||||||
|
//
|
||||||
|
// Protobuf library provides support to pack/unpack Any values in the form
|
||||||
|
// of utility functions or additional generated methods of the Any type.
|
||||||
|
//
|
||||||
|
// Example 1: Pack and unpack a message in C++.
|
||||||
|
//
|
||||||
|
// Foo foo = ...;
|
||||||
|
// Any any;
|
||||||
|
// any.PackFrom(foo);
|
||||||
|
// ...
|
||||||
|
// if (any.UnpackTo(&foo)) {
|
||||||
|
// ...
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Example 2: Pack and unpack a message in Java.
|
||||||
|
//
|
||||||
|
// Foo foo = ...;
|
||||||
|
// Any any = Any.pack(foo);
|
||||||
|
// ...
|
||||||
|
// if (any.is(Foo.class)) {
|
||||||
|
// foo = any.unpack(Foo.class);
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Example 3: Pack and unpack a message in Python.
|
||||||
|
//
|
||||||
|
// foo = Foo(...)
|
||||||
|
// any = Any()
|
||||||
|
// any.Pack(foo)
|
||||||
|
// ...
|
||||||
|
// if any.Is(Foo.DESCRIPTOR):
|
||||||
|
// any.Unpack(foo)
|
||||||
|
// ...
|
||||||
|
//
|
||||||
|
// The pack methods provided by protobuf library will by default use
|
||||||
|
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
||||||
|
// methods only use the fully qualified type name after the last '/'
|
||||||
|
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
||||||
|
// name "y.z".
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// JSON
|
||||||
|
// ====
|
||||||
|
// The JSON representation of an `Any` value uses the regular
|
||||||
|
// representation of the deserialized, embedded message, with an
|
||||||
|
// additional field `@type` which contains the type URL. Example:
|
||||||
|
//
|
||||||
|
// package google.profile;
|
||||||
|
// message Person {
|
||||||
|
// string first_name = 1;
|
||||||
|
// string last_name = 2;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// "@type": "type.googleapis.com/google.profile.Person",
|
||||||
|
// "firstName": <string>,
|
||||||
|
// "lastName": <string>
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// If the embedded message type is well-known and has a custom JSON
|
||||||
|
// representation, that representation will be embedded adding a field
|
||||||
|
// `value` which holds the custom JSON in addition to the `@type`
|
||||||
|
// field. Example (for message [google.protobuf.Duration][]):
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
||||||
|
// "value": "1.212s"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
type Any struct {
|
||||||
|
// A URL/resource name whose content describes the type of the
|
||||||
|
// serialized protocol buffer message.
|
||||||
|
//
|
||||||
|
// For URLs which use the scheme `http`, `https`, or no scheme, the
|
||||||
|
// following restrictions and interpretations apply:
|
||||||
|
//
|
||||||
|
// * If no scheme is provided, `https` is assumed.
|
||||||
|
// * The last segment of the URL's path must represent the fully
|
||||||
|
// qualified name of the type (as in `path/google.protobuf.Duration`).
|
||||||
|
// The name should be in a canonical form (e.g., leading "." is
|
||||||
|
// not accepted).
|
||||||
|
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||||
|
// value in binary format, or produce an error.
|
||||||
|
// * Applications are allowed to cache lookup results based on the
|
||||||
|
// URL, or have them precompiled into a binary to avoid any
|
||||||
|
// lookup. Therefore, binary compatibility needs to be preserved
|
||||||
|
// on changes to types. (Use versioned type names to manage
|
||||||
|
// breaking changes.)
|
||||||
|
//
|
||||||
|
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||||
|
// used with implementation specific semantics.
|
||||||
|
//
|
||||||
|
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
|
||||||
|
// Must be a valid serialized protocol buffer of the above specified type.
|
||||||
|
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Any) Reset() { *m = Any{} }
|
||||||
|
func (*Any) ProtoMessage() {}
|
||||||
|
func (*Any) Descriptor() ([]byte, []int) { return fileDescriptorAny, []int{0} }
|
||||||
|
func (*Any) XXX_WellKnownType() string { return "Any" }
|
||||||
|
|
||||||
|
func (m *Any) GetTypeUrl() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.TypeUrl
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Any) GetValue() []byte {
|
||||||
|
if m != nil {
|
||||||
|
return m.Value
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
|
||||||
|
}
|
||||||
|
func (this *Any) Compare(that interface{}) int {
|
||||||
|
if that == nil {
|
||||||
|
if this == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*Any)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(Any)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
if this == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
} else if this == nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if this.TypeUrl != that1.TypeUrl {
|
||||||
|
if this.TypeUrl < that1.TypeUrl {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
if c := bytes.Compare(this.Value, that1.Value); c != 0 {
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
func (this *Any) Equal(that interface{}) bool {
|
||||||
|
if that == nil {
|
||||||
|
if this == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*Any)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(Any)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
if this == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
} else if this == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if this.TypeUrl != that1.TypeUrl {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if !bytes.Equal(this.Value, that1.Value) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
func (this *Any) GoString() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := make([]string, 0, 6)
|
||||||
|
s = append(s, "&types.Any{")
|
||||||
|
s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n")
|
||||||
|
s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
|
||||||
|
s = append(s, "}")
|
||||||
|
return strings.Join(s, "")
|
||||||
|
}
|
||||||
|
func valueToGoStringAny(v interface{}, typ string) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||||
|
}
|
||||||
|
func (m *Any) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Any) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.TypeUrl) > 0 {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
i = encodeVarintAny(dAtA, i, uint64(len(m.TypeUrl)))
|
||||||
|
i += copy(dAtA[i:], m.TypeUrl)
|
||||||
|
}
|
||||||
|
if len(m.Value) > 0 {
|
||||||
|
dAtA[i] = 0x12
|
||||||
|
i++
|
||||||
|
i = encodeVarintAny(dAtA, i, uint64(len(m.Value)))
|
||||||
|
i += copy(dAtA[i:], m.Value)
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeFixed64Any(dAtA []byte, offset int, v uint64) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
|
return offset + 8
|
||||||
|
}
|
||||||
|
func encodeFixed32Any(dAtA []byte, offset int, v uint32) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
return offset + 4
|
||||||
|
}
|
||||||
|
func encodeVarintAny(dAtA []byte, offset int, v uint64) int {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return offset + 1
|
||||||
|
}
|
||||||
|
func NewPopulatedAny(r randyAny, easy bool) *Any {
|
||||||
|
this := &Any{}
|
||||||
|
this.TypeUrl = string(randStringAny(r))
|
||||||
|
v1 := r.Intn(100)
|
||||||
|
this.Value = make([]byte, v1)
|
||||||
|
for i := 0; i < v1; i++ {
|
||||||
|
this.Value[i] = byte(r.Intn(256))
|
||||||
|
}
|
||||||
|
if !easy && r.Intn(10) != 0 {
|
||||||
|
}
|
||||||
|
return this
|
||||||
|
}
|
||||||
|
|
||||||
|
type randyAny interface {
|
||||||
|
Float32() float32
|
||||||
|
Float64() float64
|
||||||
|
Int63() int64
|
||||||
|
Int31() int32
|
||||||
|
Uint32() uint32
|
||||||
|
Intn(n int) int
|
||||||
|
}
|
||||||
|
|
||||||
|
func randUTF8RuneAny(r randyAny) rune {
|
||||||
|
ru := r.Intn(62)
|
||||||
|
if ru < 10 {
|
||||||
|
return rune(ru + 48)
|
||||||
|
} else if ru < 36 {
|
||||||
|
return rune(ru + 55)
|
||||||
|
}
|
||||||
|
return rune(ru + 61)
|
||||||
|
}
|
||||||
|
func randStringAny(r randyAny) string {
|
||||||
|
v2 := r.Intn(100)
|
||||||
|
tmps := make([]rune, v2)
|
||||||
|
for i := 0; i < v2; i++ {
|
||||||
|
tmps[i] = randUTF8RuneAny(r)
|
||||||
|
}
|
||||||
|
return string(tmps)
|
||||||
|
}
|
||||||
|
func randUnrecognizedAny(r randyAny, maxFieldNumber int) (dAtA []byte) {
|
||||||
|
l := r.Intn(5)
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
wire := r.Intn(4)
|
||||||
|
if wire == 3 {
|
||||||
|
wire = 5
|
||||||
|
}
|
||||||
|
fieldNumber := maxFieldNumber + r.Intn(100)
|
||||||
|
dAtA = randFieldAny(dAtA, r, fieldNumber, wire)
|
||||||
|
}
|
||||||
|
return dAtA
|
||||||
|
}
|
||||||
|
func randFieldAny(dAtA []byte, r randyAny, fieldNumber int, wire int) []byte {
|
||||||
|
key := uint32(fieldNumber)<<3 | uint32(wire)
|
||||||
|
switch wire {
|
||||||
|
case 0:
|
||||||
|
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
|
||||||
|
v3 := r.Int63()
|
||||||
|
if r.Intn(2) == 0 {
|
||||||
|
v3 *= -1
|
||||||
|
}
|
||||||
|
dAtA = encodeVarintPopulateAny(dAtA, uint64(v3))
|
||||||
|
case 1:
|
||||||
|
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
|
||||||
|
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||||
|
case 2:
|
||||||
|
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
|
||||||
|
ll := r.Intn(100)
|
||||||
|
dAtA = encodeVarintPopulateAny(dAtA, uint64(ll))
|
||||||
|
for j := 0; j < ll; j++ {
|
||||||
|
dAtA = append(dAtA, byte(r.Intn(256)))
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
|
||||||
|
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||||
|
}
|
||||||
|
return dAtA
|
||||||
|
}
|
||||||
|
func encodeVarintPopulateAny(dAtA []byte, v uint64) []byte {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))
|
||||||
|
v >>= 7
|
||||||
|
}
|
||||||
|
dAtA = append(dAtA, uint8(v))
|
||||||
|
return dAtA
|
||||||
|
}
|
||||||
|
func (m *Any) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
l = len(m.TypeUrl)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovAny(uint64(l))
|
||||||
|
}
|
||||||
|
l = len(m.Value)
|
||||||
|
if l > 0 {
|
||||||
|
n += 1 + l + sovAny(uint64(l))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovAny(x uint64) (n int) {
|
||||||
|
for {
|
||||||
|
n++
|
||||||
|
x >>= 7
|
||||||
|
if x == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
func sozAny(x uint64) (n int) {
|
||||||
|
return sovAny(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (this *Any) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&Any{`,
|
||||||
|
`TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`,
|
||||||
|
`Value:` + fmt.Sprintf("%v", this.Value) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func valueToStringAny(v interface{}) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("*%v", pv)
|
||||||
|
}
|
||||||
|
func (m *Any) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAny
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: Any: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: Any: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAny
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthAny
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.TypeUrl = string(dAtA[iNdEx:postIndex])
|
||||||
|
iNdEx = postIndex
|
||||||
|
case 2:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
|
||||||
|
}
|
||||||
|
var byteLen int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowAny
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
byteLen |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if byteLen < 0 {
|
||||||
|
return ErrInvalidLengthAny
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + byteLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
|
||||||
|
if m.Value == nil {
|
||||||
|
m.Value = []byte{}
|
||||||
|
}
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipAny(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthAny
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipAny(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowAny
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowAny
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
return iNdEx, nil
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowAny
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthAny
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 3:
|
||||||
|
for {
|
||||||
|
var innerWire uint64
|
||||||
|
var start int = iNdEx
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowAny
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
innerWireType := int(innerWire & 0x7)
|
||||||
|
if innerWireType == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next, err := skipAny(dAtA[start:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iNdEx = start + next
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 4:
|
||||||
|
return iNdEx, nil
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
return iNdEx, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthAny = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowAny = fmt.Errorf("proto: integer overflow")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("any.proto", fileDescriptorAny) }
|
||||||
|
|
||||||
|
var fileDescriptorAny = []byte{
|
||||||
|
// 204 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4c, 0xcc, 0xab, 0xd4,
|
||||||
|
0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0x85, 0xf0, 0x92,
|
||||||
|
0x4a, 0xd3, 0x94, 0xcc, 0xb8, 0x98, 0x1d, 0xf3, 0x2a, 0x85, 0x24, 0xb9, 0x38, 0x4a, 0x2a, 0x0b,
|
||||||
|
0x52, 0xe3, 0x4b, 0x8b, 0x72, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0xd8, 0x41, 0xfc, 0xd0,
|
||||||
|
0xa2, 0x1c, 0x21, 0x11, 0x2e, 0xd6, 0xb2, 0xc4, 0x9c, 0xd2, 0x54, 0x09, 0x26, 0x05, 0x46, 0x0d,
|
||||||
|
0x9e, 0x20, 0x08, 0xc7, 0xa9, 0xfe, 0xc2, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18, 0x3e, 0x3c,
|
||||||
|
0x94, 0x63, 0xfc, 0xf1, 0x50, 0x8e, 0xb1, 0xe1, 0x91, 0x1c, 0xe3, 0x8a, 0x47, 0x72, 0x8c, 0x27,
|
||||||
|
0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x8b, 0x47, 0x72, 0x0c,
|
||||||
|
0x1f, 0x40, 0xe2, 0x8f, 0xe5, 0x18, 0xb9, 0x84, 0x93, 0xf3, 0x73, 0xf5, 0xd0, 0xac, 0x77, 0xe2,
|
||||||
|
0x70, 0xcc, 0xab, 0x0c, 0x00, 0x71, 0x02, 0x18, 0xa3, 0x58, 0x41, 0x36, 0x16, 0x2f, 0x62, 0x62,
|
||||||
|
0x76, 0x0f, 0x70, 0x5a, 0xc5, 0x24, 0xe7, 0x0e, 0x51, 0x1a, 0x00, 0x55, 0xaa, 0x17, 0x9e, 0x9a,
|
||||||
|
0x93, 0xe3, 0x9d, 0x97, 0x5f, 0x9e, 0x17, 0x02, 0x52, 0x96, 0xc4, 0x06, 0x36, 0xc3, 0x18, 0x10,
|
||||||
|
0x00, 0x00, 0xff, 0xff, 0xb7, 0x39, 0x2f, 0x89, 0xdd, 0x00, 0x00, 0x00,
|
||||||
|
}
|
35
vendor/github.com/gogo/protobuf/types/doc.go
generated
vendored
Normal file
35
vendor/github.com/gogo/protobuf/types/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package types contains code for interacting with well-known types.
|
||||||
|
*/
|
||||||
|
package types
|
100
vendor/github.com/gogo/protobuf/types/duration.go
generated
vendored
Normal file
100
vendor/github.com/gogo/protobuf/types/duration.go
generated
vendored
Normal file
|
@ -0,0 +1,100 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
// This file implements conversions between google.protobuf.Duration
|
||||||
|
// and time.Duration.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Range of a Duration in seconds, as specified in
|
||||||
|
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
|
||||||
|
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
|
||||||
|
minSeconds = -maxSeconds
|
||||||
|
)
|
||||||
|
|
||||||
|
// validateDuration determines whether the Duration is valid according to the
|
||||||
|
// definition in google/protobuf/duration.proto. A valid Duration
|
||||||
|
// may still be too large to fit into a time.Duration (the range of Duration
|
||||||
|
// is about 10,000 years, and the range of time.Duration is about 290).
|
||||||
|
func validateDuration(d *Duration) error {
|
||||||
|
if d == nil {
|
||||||
|
return errors.New("duration: nil Duration")
|
||||||
|
}
|
||||||
|
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
|
||||||
|
return fmt.Errorf("duration: %#v: seconds out of range", d)
|
||||||
|
}
|
||||||
|
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
|
||||||
|
return fmt.Errorf("duration: %#v: nanos out of range", d)
|
||||||
|
}
|
||||||
|
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
||||||
|
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
|
||||||
|
return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DurationFromProto converts a Duration to a time.Duration. DurationFromProto
|
||||||
|
// returns an error if the Duration is invalid or is too large to be
|
||||||
|
// represented in a time.Duration.
|
||||||
|
func DurationFromProto(p *Duration) (time.Duration, error) {
|
||||||
|
if err := validateDuration(p); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
d := time.Duration(p.Seconds) * time.Second
|
||||||
|
if int64(d/time.Second) != p.Seconds {
|
||||||
|
return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
|
||||||
|
}
|
||||||
|
if p.Nanos != 0 {
|
||||||
|
d += time.Duration(p.Nanos)
|
||||||
|
if (d < 0) != (p.Nanos < 0) {
|
||||||
|
return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return d, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DurationProto converts a time.Duration to a Duration.
|
||||||
|
func DurationProto(d time.Duration) *Duration {
|
||||||
|
nanos := d.Nanoseconds()
|
||||||
|
secs := nanos / 1e9
|
||||||
|
nanos -= secs * 1e9
|
||||||
|
return &Duration{
|
||||||
|
Seconds: secs,
|
||||||
|
Nanos: int32(nanos),
|
||||||
|
}
|
||||||
|
}
|
500
vendor/github.com/gogo/protobuf/types/duration.pb.go
generated
vendored
Normal file
500
vendor/github.com/gogo/protobuf/types/duration.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,500 @@
|
||||||
|
// Code generated by protoc-gen-gogo.
|
||||||
|
// source: duration.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package types is a generated protocol buffer package.
|
||||||
|
|
||||||
|
It is generated from these files:
|
||||||
|
duration.proto
|
||||||
|
|
||||||
|
It has these top-level messages:
|
||||||
|
Duration
|
||||||
|
*/
|
||||||
|
package types
|
||||||
|
|
||||||
|
import proto "github.com/gogo/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
|
||||||
|
import strings "strings"
|
||||||
|
import reflect "reflect"
|
||||||
|
|
||||||
|
import io "io"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
|
// A Duration represents a signed, fixed-length span of time represented
|
||||||
|
// as a count of seconds and fractions of seconds at nanosecond
|
||||||
|
// resolution. It is independent of any calendar and concepts like "day"
|
||||||
|
// or "month". It is related to Timestamp in that the difference between
|
||||||
|
// two Timestamp values is a Duration and it can be added or subtracted
|
||||||
|
// from a Timestamp. Range is approximately +-10,000 years.
|
||||||
|
//
|
||||||
|
// Example 1: Compute Duration from two Timestamps in pseudo code.
|
||||||
|
//
|
||||||
|
// Timestamp start = ...;
|
||||||
|
// Timestamp end = ...;
|
||||||
|
// Duration duration = ...;
|
||||||
|
//
|
||||||
|
// duration.seconds = end.seconds - start.seconds;
|
||||||
|
// duration.nanos = end.nanos - start.nanos;
|
||||||
|
//
|
||||||
|
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||||
|
// duration.seconds += 1;
|
||||||
|
// duration.nanos -= 1000000000;
|
||||||
|
// } else if (durations.seconds > 0 && duration.nanos < 0) {
|
||||||
|
// duration.seconds -= 1;
|
||||||
|
// duration.nanos += 1000000000;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
|
||||||
|
//
|
||||||
|
// Timestamp start = ...;
|
||||||
|
// Duration duration = ...;
|
||||||
|
// Timestamp end = ...;
|
||||||
|
//
|
||||||
|
// end.seconds = start.seconds + duration.seconds;
|
||||||
|
// end.nanos = start.nanos + duration.nanos;
|
||||||
|
//
|
||||||
|
// if (end.nanos < 0) {
|
||||||
|
// end.seconds -= 1;
|
||||||
|
// end.nanos += 1000000000;
|
||||||
|
// } else if (end.nanos >= 1000000000) {
|
||||||
|
// end.seconds += 1;
|
||||||
|
// end.nanos -= 1000000000;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Example 3: Compute Duration from datetime.timedelta in Python.
|
||||||
|
//
|
||||||
|
// td = datetime.timedelta(days=3, minutes=10)
|
||||||
|
// duration = Duration()
|
||||||
|
// duration.FromTimedelta(td)
|
||||||
|
//
|
||||||
|
//
|
||||||
|
type Duration struct {
|
||||||
|
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||||
|
// to +315,576,000,000 inclusive.
|
||||||
|
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
||||||
|
// Signed fractions of a second at nanosecond resolution of the span
|
||||||
|
// of time. Durations less than one second are represented with a 0
|
||||||
|
// `seconds` field and a positive or negative `nanos` field. For durations
|
||||||
|
// of one second or more, a non-zero value for the `nanos` field must be
|
||||||
|
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||||
|
// to +999,999,999 inclusive.
|
||||||
|
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Duration) Reset() { *m = Duration{} }
|
||||||
|
func (*Duration) ProtoMessage() {}
|
||||||
|
func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptorDuration, []int{0} }
|
||||||
|
func (*Duration) XXX_WellKnownType() string { return "Duration" }
|
||||||
|
|
||||||
|
func (m *Duration) GetSeconds() int64 {
|
||||||
|
if m != nil {
|
||||||
|
return m.Seconds
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Duration) GetNanos() int32 {
|
||||||
|
if m != nil {
|
||||||
|
return m.Nanos
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
|
||||||
|
}
|
||||||
|
func (this *Duration) Compare(that interface{}) int {
|
||||||
|
if that == nil {
|
||||||
|
if this == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*Duration)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(Duration)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
if this == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
} else if this == nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if this.Seconds != that1.Seconds {
|
||||||
|
if this.Seconds < that1.Seconds {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
if this.Nanos != that1.Nanos {
|
||||||
|
if this.Nanos < that1.Nanos {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
func (this *Duration) Equal(that interface{}) bool {
|
||||||
|
if that == nil {
|
||||||
|
if this == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*Duration)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(Duration)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
if this == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
} else if this == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if this.Seconds != that1.Seconds {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if this.Nanos != that1.Nanos {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
func (this *Duration) GoString() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := make([]string, 0, 6)
|
||||||
|
s = append(s, "&types.Duration{")
|
||||||
|
s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n")
|
||||||
|
s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n")
|
||||||
|
s = append(s, "}")
|
||||||
|
return strings.Join(s, "")
|
||||||
|
}
|
||||||
|
func valueToGoStringDuration(v interface{}, typ string) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||||
|
}
|
||||||
|
func (m *Duration) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Duration) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.Seconds != 0 {
|
||||||
|
dAtA[i] = 0x8
|
||||||
|
i++
|
||||||
|
i = encodeVarintDuration(dAtA, i, uint64(m.Seconds))
|
||||||
|
}
|
||||||
|
if m.Nanos != 0 {
|
||||||
|
dAtA[i] = 0x10
|
||||||
|
i++
|
||||||
|
i = encodeVarintDuration(dAtA, i, uint64(m.Nanos))
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeFixed64Duration(dAtA []byte, offset int, v uint64) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
|
return offset + 8
|
||||||
|
}
|
||||||
|
func encodeFixed32Duration(dAtA []byte, offset int, v uint32) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
return offset + 4
|
||||||
|
}
|
||||||
|
func encodeVarintDuration(dAtA []byte, offset int, v uint64) int {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return offset + 1
|
||||||
|
}
|
||||||
|
func (m *Duration) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.Seconds != 0 {
|
||||||
|
n += 1 + sovDuration(uint64(m.Seconds))
|
||||||
|
}
|
||||||
|
if m.Nanos != 0 {
|
||||||
|
n += 1 + sovDuration(uint64(m.Nanos))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovDuration(x uint64) (n int) {
|
||||||
|
for {
|
||||||
|
n++
|
||||||
|
x >>= 7
|
||||||
|
if x == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
func sozDuration(x uint64) (n int) {
|
||||||
|
return sovDuration(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (m *Duration) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowDuration
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: Duration: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType)
|
||||||
|
}
|
||||||
|
m.Seconds = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowDuration
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.Seconds |= (int64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType)
|
||||||
|
}
|
||||||
|
m.Nanos = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowDuration
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.Nanos |= (int32(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipDuration(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthDuration
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipDuration(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowDuration
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowDuration
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
return iNdEx, nil
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowDuration
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthDuration
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 3:
|
||||||
|
for {
|
||||||
|
var innerWire uint64
|
||||||
|
var start int = iNdEx
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowDuration
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
innerWireType := int(innerWire & 0x7)
|
||||||
|
if innerWireType == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next, err := skipDuration(dAtA[start:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iNdEx = start + next
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 4:
|
||||||
|
return iNdEx, nil
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
return iNdEx, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthDuration = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowDuration = fmt.Errorf("proto: integer overflow")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("duration.proto", fileDescriptorDuration) }
|
||||||
|
|
||||||
|
var fileDescriptorDuration = []byte{
|
||||||
|
// 203 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4b, 0x29, 0x2d, 0x4a,
|
||||||
|
0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf, 0x4f,
|
||||||
|
0xcf, 0x49, 0x85, 0xf0, 0x92, 0x4a, 0xd3, 0x94, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24,
|
||||||
|
0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83,
|
||||||
|
0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d,
|
||||||
|
0xd6, 0x20, 0x08, 0xc7, 0xa9, 0xfe, 0xc2, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18, 0x3e, 0x3c,
|
||||||
|
0x94, 0x63, 0x5c, 0xf1, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f,
|
||||||
|
0x3c, 0x92, 0x63, 0x7c, 0xf1, 0x48, 0x8e, 0xe1, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f, 0xe5, 0x18,
|
||||||
|
0xb9, 0x84, 0x93, 0xf3, 0x73, 0xf5, 0xd0, 0xac, 0x76, 0xe2, 0x85, 0x59, 0x1c, 0x00, 0x12, 0x09,
|
||||||
|
0x60, 0x8c, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0xfe, 0xc1, 0xc8, 0xb8, 0x88, 0x89, 0xd9, 0x3d,
|
||||||
|
0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0x44, 0x4b, 0x00, 0x54, 0x8b, 0x5e, 0x78, 0x6a, 0x4e, 0x8e,
|
||||||
|
0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x65, 0x12, 0x1b, 0xd8, 0x2c, 0x63, 0x40, 0x00, 0x00,
|
||||||
|
0x00, 0xff, 0xff, 0x9d, 0x5a, 0x25, 0xa5, 0xe6, 0x00, 0x00, 0x00,
|
||||||
|
}
|
100
vendor/github.com/gogo/protobuf/types/duration_gogo.go
generated
vendored
Normal file
100
vendor/github.com/gogo/protobuf/types/duration_gogo.go
generated
vendored
Normal file
|
@ -0,0 +1,100 @@
|
||||||
|
// Protocol Buffers for Go with Gadgets
|
||||||
|
//
|
||||||
|
// Copyright (c) 2016, The GoGo Authors. All rights reserved.
|
||||||
|
// http://github.com/gogo/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewPopulatedDuration(r interface {
|
||||||
|
Int63() int64
|
||||||
|
}, easy bool) *Duration {
|
||||||
|
this := &Duration{}
|
||||||
|
maxSecs := time.Hour.Nanoseconds() / 1e9
|
||||||
|
max := 2 * maxSecs
|
||||||
|
s := int64(r.Int63()) % max
|
||||||
|
s -= maxSecs
|
||||||
|
neg := int64(1)
|
||||||
|
if s < 0 {
|
||||||
|
neg = -1
|
||||||
|
}
|
||||||
|
this.Seconds = s
|
||||||
|
this.Nanos = int32(neg * (r.Int63() % 1e9))
|
||||||
|
return this
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Duration) String() string {
|
||||||
|
td, err := DurationFromProto(d)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Sprintf("(%v)", err)
|
||||||
|
}
|
||||||
|
return td.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPopulatedStdDuration(r interface {
|
||||||
|
Int63() int64
|
||||||
|
}, easy bool) *time.Duration {
|
||||||
|
dur := NewPopulatedDuration(r, easy)
|
||||||
|
d, err := DurationFromProto(dur)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &d
|
||||||
|
}
|
||||||
|
|
||||||
|
func SizeOfStdDuration(d time.Duration) int {
|
||||||
|
dur := DurationProto(d)
|
||||||
|
return dur.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdDurationMarshal(d time.Duration) ([]byte, error) {
|
||||||
|
size := SizeOfStdDuration(d)
|
||||||
|
buf := make([]byte, size)
|
||||||
|
_, err := StdDurationMarshalTo(d, buf)
|
||||||
|
return buf, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdDurationMarshalTo(d time.Duration, data []byte) (int, error) {
|
||||||
|
dur := DurationProto(d)
|
||||||
|
return dur.MarshalTo(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdDurationUnmarshal(d *time.Duration, data []byte) error {
|
||||||
|
dur := &Duration{}
|
||||||
|
if err := dur.Unmarshal(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
dd, err := DurationFromProto(dur)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*d = dd
|
||||||
|
return nil
|
||||||
|
}
|
457
vendor/github.com/gogo/protobuf/types/empty.pb.go
generated
vendored
Normal file
457
vendor/github.com/gogo/protobuf/types/empty.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,457 @@
|
||||||
|
// Code generated by protoc-gen-gogo.
|
||||||
|
// source: empty.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package types is a generated protocol buffer package.
|
||||||
|
|
||||||
|
It is generated from these files:
|
||||||
|
empty.proto
|
||||||
|
|
||||||
|
It has these top-level messages:
|
||||||
|
Empty
|
||||||
|
*/
|
||||||
|
package types
|
||||||
|
|
||||||
|
import proto "github.com/gogo/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
|
||||||
|
import strings "strings"
|
||||||
|
import reflect "reflect"
|
||||||
|
|
||||||
|
import io "io"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
|
// A generic empty message that you can re-use to avoid defining duplicated
|
||||||
|
// empty messages in your APIs. A typical example is to use it as the request
|
||||||
|
// or the response type of an API method. For instance:
|
||||||
|
//
|
||||||
|
// service Foo {
|
||||||
|
// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// The JSON representation for `Empty` is empty JSON object `{}`.
|
||||||
|
type Empty struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Empty) Reset() { *m = Empty{} }
|
||||||
|
func (*Empty) ProtoMessage() {}
|
||||||
|
func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptorEmpty, []int{0} }
|
||||||
|
func (*Empty) XXX_WellKnownType() string { return "Empty" }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*Empty)(nil), "google.protobuf.Empty")
|
||||||
|
}
|
||||||
|
func (this *Empty) Compare(that interface{}) int {
|
||||||
|
if that == nil {
|
||||||
|
if this == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*Empty)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(Empty)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
if this == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
} else if this == nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
func (this *Empty) Equal(that interface{}) bool {
|
||||||
|
if that == nil {
|
||||||
|
if this == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*Empty)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(Empty)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
if this == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
} else if this == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
func (this *Empty) GoString() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := make([]string, 0, 4)
|
||||||
|
s = append(s, "&types.Empty{")
|
||||||
|
s = append(s, "}")
|
||||||
|
return strings.Join(s, "")
|
||||||
|
}
|
||||||
|
func valueToGoStringEmpty(v interface{}, typ string) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||||
|
}
|
||||||
|
func (m *Empty) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Empty) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeFixed64Empty(dAtA []byte, offset int, v uint64) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
|
return offset + 8
|
||||||
|
}
|
||||||
|
func encodeFixed32Empty(dAtA []byte, offset int, v uint32) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
return offset + 4
|
||||||
|
}
|
||||||
|
func encodeVarintEmpty(dAtA []byte, offset int, v uint64) int {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return offset + 1
|
||||||
|
}
|
||||||
|
func NewPopulatedEmpty(r randyEmpty, easy bool) *Empty {
|
||||||
|
this := &Empty{}
|
||||||
|
if !easy && r.Intn(10) != 0 {
|
||||||
|
}
|
||||||
|
return this
|
||||||
|
}
|
||||||
|
|
||||||
|
type randyEmpty interface {
|
||||||
|
Float32() float32
|
||||||
|
Float64() float64
|
||||||
|
Int63() int64
|
||||||
|
Int31() int32
|
||||||
|
Uint32() uint32
|
||||||
|
Intn(n int) int
|
||||||
|
}
|
||||||
|
|
||||||
|
func randUTF8RuneEmpty(r randyEmpty) rune {
|
||||||
|
ru := r.Intn(62)
|
||||||
|
if ru < 10 {
|
||||||
|
return rune(ru + 48)
|
||||||
|
} else if ru < 36 {
|
||||||
|
return rune(ru + 55)
|
||||||
|
}
|
||||||
|
return rune(ru + 61)
|
||||||
|
}
|
||||||
|
func randStringEmpty(r randyEmpty) string {
|
||||||
|
v1 := r.Intn(100)
|
||||||
|
tmps := make([]rune, v1)
|
||||||
|
for i := 0; i < v1; i++ {
|
||||||
|
tmps[i] = randUTF8RuneEmpty(r)
|
||||||
|
}
|
||||||
|
return string(tmps)
|
||||||
|
}
|
||||||
|
func randUnrecognizedEmpty(r randyEmpty, maxFieldNumber int) (dAtA []byte) {
|
||||||
|
l := r.Intn(5)
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
wire := r.Intn(4)
|
||||||
|
if wire == 3 {
|
||||||
|
wire = 5
|
||||||
|
}
|
||||||
|
fieldNumber := maxFieldNumber + r.Intn(100)
|
||||||
|
dAtA = randFieldEmpty(dAtA, r, fieldNumber, wire)
|
||||||
|
}
|
||||||
|
return dAtA
|
||||||
|
}
|
||||||
|
func randFieldEmpty(dAtA []byte, r randyEmpty, fieldNumber int, wire int) []byte {
|
||||||
|
key := uint32(fieldNumber)<<3 | uint32(wire)
|
||||||
|
switch wire {
|
||||||
|
case 0:
|
||||||
|
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key))
|
||||||
|
v2 := r.Int63()
|
||||||
|
if r.Intn(2) == 0 {
|
||||||
|
v2 *= -1
|
||||||
|
}
|
||||||
|
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(v2))
|
||||||
|
case 1:
|
||||||
|
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key))
|
||||||
|
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||||
|
case 2:
|
||||||
|
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key))
|
||||||
|
ll := r.Intn(100)
|
||||||
|
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(ll))
|
||||||
|
for j := 0; j < ll; j++ {
|
||||||
|
dAtA = append(dAtA, byte(r.Intn(256)))
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key))
|
||||||
|
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||||
|
}
|
||||||
|
return dAtA
|
||||||
|
}
|
||||||
|
func encodeVarintPopulateEmpty(dAtA []byte, v uint64) []byte {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))
|
||||||
|
v >>= 7
|
||||||
|
}
|
||||||
|
dAtA = append(dAtA, uint8(v))
|
||||||
|
return dAtA
|
||||||
|
}
|
||||||
|
func (m *Empty) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovEmpty(x uint64) (n int) {
|
||||||
|
for {
|
||||||
|
n++
|
||||||
|
x >>= 7
|
||||||
|
if x == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
func sozEmpty(x uint64) (n int) {
|
||||||
|
return sovEmpty(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (this *Empty) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&Empty{`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func valueToStringEmpty(v interface{}) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("*%v", pv)
|
||||||
|
}
|
||||||
|
func (m *Empty) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowEmpty
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: Empty: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: Empty: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipEmpty(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthEmpty
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipEmpty(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowEmpty
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowEmpty
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
return iNdEx, nil
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowEmpty
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthEmpty
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 3:
|
||||||
|
for {
|
||||||
|
var innerWire uint64
|
||||||
|
var start int = iNdEx
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowEmpty
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
innerWireType := int(innerWire & 0x7)
|
||||||
|
if innerWireType == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next, err := skipEmpty(dAtA[start:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iNdEx = start + next
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 4:
|
||||||
|
return iNdEx, nil
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
return iNdEx, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthEmpty = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowEmpty = fmt.Errorf("proto: integer overflow")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("empty.proto", fileDescriptorEmpty) }
|
||||||
|
|
||||||
|
var fileDescriptorEmpty = []byte{
|
||||||
|
// 169 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4e, 0xcd, 0x2d, 0x28,
|
||||||
|
0xa9, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0x85,
|
||||||
|
0xf0, 0x92, 0x4a, 0xd3, 0x94, 0xd8, 0xb9, 0x58, 0x5d, 0x41, 0xf2, 0x4e, 0x2d, 0x8c, 0x17, 0x1e,
|
||||||
|
0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, 0xa1, 0x1c, 0xe3, 0x8f, 0x87, 0x72, 0x8c, 0x0d,
|
||||||
|
0x8f, 0xe4, 0x18, 0x57, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6,
|
||||||
|
0x07, 0x8f, 0xe4, 0x18, 0x5f, 0x3c, 0x92, 0x63, 0xf8, 0x00, 0x12, 0x7f, 0x2c, 0xc7, 0xc8, 0x25,
|
||||||
|
0x9c, 0x9c, 0x9f, 0xab, 0x87, 0x66, 0xa0, 0x13, 0x17, 0xd8, 0xb8, 0x00, 0x10, 0x37, 0x80, 0x31,
|
||||||
|
0x8a, 0xb5, 0xa4, 0xb2, 0x20, 0xb5, 0xf8, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7,
|
||||||
|
0x55, 0x4c, 0x72, 0xee, 0x10, 0xf5, 0x01, 0x50, 0xf5, 0x7a, 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79,
|
||||||
|
0xf9, 0xe5, 0x79, 0x21, 0x20, 0x95, 0x49, 0x6c, 0x60, 0x83, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff,
|
||||||
|
0xff, 0x7c, 0xa8, 0xf0, 0xc4, 0xb6, 0x00, 0x00, 0x00,
|
||||||
|
}
|
738
vendor/github.com/gogo/protobuf/types/field_mask.pb.go
generated
vendored
Normal file
738
vendor/github.com/gogo/protobuf/types/field_mask.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,738 @@
|
||||||
|
// Code generated by protoc-gen-gogo.
|
||||||
|
// source: field_mask.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package types is a generated protocol buffer package.
|
||||||
|
|
||||||
|
It is generated from these files:
|
||||||
|
field_mask.proto
|
||||||
|
|
||||||
|
It has these top-level messages:
|
||||||
|
FieldMask
|
||||||
|
*/
|
||||||
|
package types
|
||||||
|
|
||||||
|
import proto "github.com/gogo/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
|
||||||
|
import strings "strings"
|
||||||
|
import reflect "reflect"
|
||||||
|
|
||||||
|
import io "io"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
|
// `FieldMask` represents a set of symbolic field paths, for example:
|
||||||
|
//
|
||||||
|
// paths: "f.a"
|
||||||
|
// paths: "f.b.d"
|
||||||
|
//
|
||||||
|
// Here `f` represents a field in some root message, `a` and `b`
|
||||||
|
// fields in the message found in `f`, and `d` a field found in the
|
||||||
|
// message in `f.b`.
|
||||||
|
//
|
||||||
|
// Field masks are used to specify a subset of fields that should be
|
||||||
|
// returned by a get operation or modified by an update operation.
|
||||||
|
// Field masks also have a custom JSON encoding (see below).
|
||||||
|
//
|
||||||
|
// # Field Masks in Projections
|
||||||
|
//
|
||||||
|
// When used in the context of a projection, a response message or
|
||||||
|
// sub-message is filtered by the API to only contain those fields as
|
||||||
|
// specified in the mask. For example, if the mask in the previous
|
||||||
|
// example is applied to a response message as follows:
|
||||||
|
//
|
||||||
|
// f {
|
||||||
|
// a : 22
|
||||||
|
// b {
|
||||||
|
// d : 1
|
||||||
|
// x : 2
|
||||||
|
// }
|
||||||
|
// y : 13
|
||||||
|
// }
|
||||||
|
// z: 8
|
||||||
|
//
|
||||||
|
// The result will not contain specific values for fields x,y and z
|
||||||
|
// (their value will be set to the default, and omitted in proto text
|
||||||
|
// output):
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// f {
|
||||||
|
// a : 22
|
||||||
|
// b {
|
||||||
|
// d : 1
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// A repeated field is not allowed except at the last position of a
|
||||||
|
// paths string.
|
||||||
|
//
|
||||||
|
// If a FieldMask object is not present in a get operation, the
|
||||||
|
// operation applies to all fields (as if a FieldMask of all fields
|
||||||
|
// had been specified).
|
||||||
|
//
|
||||||
|
// Note that a field mask does not necessarily apply to the
|
||||||
|
// top-level response message. In case of a REST get operation, the
|
||||||
|
// field mask applies directly to the response, but in case of a REST
|
||||||
|
// list operation, the mask instead applies to each individual message
|
||||||
|
// in the returned resource list. In case of a REST custom method,
|
||||||
|
// other definitions may be used. Where the mask applies will be
|
||||||
|
// clearly documented together with its declaration in the API. In
|
||||||
|
// any case, the effect on the returned resource/resources is required
|
||||||
|
// behavior for APIs.
|
||||||
|
//
|
||||||
|
// # Field Masks in Update Operations
|
||||||
|
//
|
||||||
|
// A field mask in update operations specifies which fields of the
|
||||||
|
// targeted resource are going to be updated. The API is required
|
||||||
|
// to only change the values of the fields as specified in the mask
|
||||||
|
// and leave the others untouched. If a resource is passed in to
|
||||||
|
// describe the updated values, the API ignores the values of all
|
||||||
|
// fields not covered by the mask.
|
||||||
|
//
|
||||||
|
// If a repeated field is specified for an update operation, the existing
|
||||||
|
// repeated values in the target resource will be overwritten by the new values.
|
||||||
|
// Note that a repeated field is only allowed in the last position of a `paths`
|
||||||
|
// string.
|
||||||
|
//
|
||||||
|
// If a sub-message is specified in the last position of the field mask for an
|
||||||
|
// update operation, then the existing sub-message in the target resource is
|
||||||
|
// overwritten. Given the target message:
|
||||||
|
//
|
||||||
|
// f {
|
||||||
|
// b {
|
||||||
|
// d : 1
|
||||||
|
// x : 2
|
||||||
|
// }
|
||||||
|
// c : 1
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// And an update message:
|
||||||
|
//
|
||||||
|
// f {
|
||||||
|
// b {
|
||||||
|
// d : 10
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// then if the field mask is:
|
||||||
|
//
|
||||||
|
// paths: "f.b"
|
||||||
|
//
|
||||||
|
// then the result will be:
|
||||||
|
//
|
||||||
|
// f {
|
||||||
|
// b {
|
||||||
|
// d : 10
|
||||||
|
// }
|
||||||
|
// c : 1
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// However, if the update mask was:
|
||||||
|
//
|
||||||
|
// paths: "f.b.d"
|
||||||
|
//
|
||||||
|
// then the result would be:
|
||||||
|
//
|
||||||
|
// f {
|
||||||
|
// b {
|
||||||
|
// d : 10
|
||||||
|
// x : 2
|
||||||
|
// }
|
||||||
|
// c : 1
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// In order to reset a field's value to the default, the field must
|
||||||
|
// be in the mask and set to the default value in the provided resource.
|
||||||
|
// Hence, in order to reset all fields of a resource, provide a default
|
||||||
|
// instance of the resource and set all fields in the mask, or do
|
||||||
|
// not provide a mask as described below.
|
||||||
|
//
|
||||||
|
// If a field mask is not present on update, the operation applies to
|
||||||
|
// all fields (as if a field mask of all fields has been specified).
|
||||||
|
// Note that in the presence of schema evolution, this may mean that
|
||||||
|
// fields the client does not know and has therefore not filled into
|
||||||
|
// the request will be reset to their default. If this is unwanted
|
||||||
|
// behavior, a specific service may require a client to always specify
|
||||||
|
// a field mask, producing an error if not.
|
||||||
|
//
|
||||||
|
// As with get operations, the location of the resource which
|
||||||
|
// describes the updated values in the request message depends on the
|
||||||
|
// operation kind. In any case, the effect of the field mask is
|
||||||
|
// required to be honored by the API.
|
||||||
|
//
|
||||||
|
// ## Considerations for HTTP REST
|
||||||
|
//
|
||||||
|
// The HTTP kind of an update operation which uses a field mask must
|
||||||
|
// be set to PATCH instead of PUT in order to satisfy HTTP semantics
|
||||||
|
// (PUT must only be used for full updates).
|
||||||
|
//
|
||||||
|
// # JSON Encoding of Field Masks
|
||||||
|
//
|
||||||
|
// In JSON, a field mask is encoded as a single string where paths are
|
||||||
|
// separated by a comma. Fields name in each path are converted
|
||||||
|
// to/from lower-camel naming conventions.
|
||||||
|
//
|
||||||
|
// As an example, consider the following message declarations:
|
||||||
|
//
|
||||||
|
// message Profile {
|
||||||
|
// User user = 1;
|
||||||
|
// Photo photo = 2;
|
||||||
|
// }
|
||||||
|
// message User {
|
||||||
|
// string display_name = 1;
|
||||||
|
// string address = 2;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// In proto a field mask for `Profile` may look as such:
|
||||||
|
//
|
||||||
|
// mask {
|
||||||
|
// paths: "user.display_name"
|
||||||
|
// paths: "photo"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// In JSON, the same mask is represented as below:
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// mask: "user.displayName,photo"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// # Field Masks and Oneof Fields
|
||||||
|
//
|
||||||
|
// Field masks treat fields in oneofs just as regular fields. Consider the
|
||||||
|
// following message:
|
||||||
|
//
|
||||||
|
// message SampleMessage {
|
||||||
|
// oneof test_oneof {
|
||||||
|
// string name = 4;
|
||||||
|
// SubMessage sub_message = 9;
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// The field mask can be:
|
||||||
|
//
|
||||||
|
// mask {
|
||||||
|
// paths: "name"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Or:
|
||||||
|
//
|
||||||
|
// mask {
|
||||||
|
// paths: "sub_message"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Note that oneof type names ("test_oneof" in this case) cannot be used in
|
||||||
|
// paths.
|
||||||
|
type FieldMask struct {
|
||||||
|
// The set of field mask paths.
|
||||||
|
Paths []string `protobuf:"bytes,1,rep,name=paths" json:"paths,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *FieldMask) Reset() { *m = FieldMask{} }
|
||||||
|
func (*FieldMask) ProtoMessage() {}
|
||||||
|
func (*FieldMask) Descriptor() ([]byte, []int) { return fileDescriptorFieldMask, []int{0} }
|
||||||
|
|
||||||
|
func (m *FieldMask) GetPaths() []string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Paths
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask")
|
||||||
|
}
|
||||||
|
func (this *FieldMask) Compare(that interface{}) int {
|
||||||
|
if that == nil {
|
||||||
|
if this == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*FieldMask)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(FieldMask)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
if this == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
} else if this == nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if len(this.Paths) != len(that1.Paths) {
|
||||||
|
if len(this.Paths) < len(that1.Paths) {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
for i := range this.Paths {
|
||||||
|
if this.Paths[i] != that1.Paths[i] {
|
||||||
|
if this.Paths[i] < that1.Paths[i] {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
func (this *FieldMask) Equal(that interface{}) bool {
|
||||||
|
if that == nil {
|
||||||
|
if this == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*FieldMask)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(FieldMask)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
if this == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
} else if this == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if len(this.Paths) != len(that1.Paths) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
for i := range this.Paths {
|
||||||
|
if this.Paths[i] != that1.Paths[i] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
func (this *FieldMask) GoString() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := make([]string, 0, 5)
|
||||||
|
s = append(s, "&types.FieldMask{")
|
||||||
|
s = append(s, "Paths: "+fmt.Sprintf("%#v", this.Paths)+",\n")
|
||||||
|
s = append(s, "}")
|
||||||
|
return strings.Join(s, "")
|
||||||
|
}
|
||||||
|
func valueToGoStringFieldMask(v interface{}, typ string) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||||
|
}
|
||||||
|
func (m *FieldMask) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *FieldMask) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Paths) > 0 {
|
||||||
|
for _, s := range m.Paths {
|
||||||
|
dAtA[i] = 0xa
|
||||||
|
i++
|
||||||
|
l = len(s)
|
||||||
|
for l >= 1<<7 {
|
||||||
|
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||||
|
l >>= 7
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
dAtA[i] = uint8(l)
|
||||||
|
i++
|
||||||
|
i += copy(dAtA[i:], s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeFixed64FieldMask(dAtA []byte, offset int, v uint64) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
|
return offset + 8
|
||||||
|
}
|
||||||
|
func encodeFixed32FieldMask(dAtA []byte, offset int, v uint32) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
return offset + 4
|
||||||
|
}
|
||||||
|
func encodeVarintFieldMask(dAtA []byte, offset int, v uint64) int {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return offset + 1
|
||||||
|
}
|
||||||
|
func NewPopulatedFieldMask(r randyFieldMask, easy bool) *FieldMask {
|
||||||
|
this := &FieldMask{}
|
||||||
|
v1 := r.Intn(10)
|
||||||
|
this.Paths = make([]string, v1)
|
||||||
|
for i := 0; i < v1; i++ {
|
||||||
|
this.Paths[i] = string(randStringFieldMask(r))
|
||||||
|
}
|
||||||
|
if !easy && r.Intn(10) != 0 {
|
||||||
|
}
|
||||||
|
return this
|
||||||
|
}
|
||||||
|
|
||||||
|
type randyFieldMask interface {
|
||||||
|
Float32() float32
|
||||||
|
Float64() float64
|
||||||
|
Int63() int64
|
||||||
|
Int31() int32
|
||||||
|
Uint32() uint32
|
||||||
|
Intn(n int) int
|
||||||
|
}
|
||||||
|
|
||||||
|
func randUTF8RuneFieldMask(r randyFieldMask) rune {
|
||||||
|
ru := r.Intn(62)
|
||||||
|
if ru < 10 {
|
||||||
|
return rune(ru + 48)
|
||||||
|
} else if ru < 36 {
|
||||||
|
return rune(ru + 55)
|
||||||
|
}
|
||||||
|
return rune(ru + 61)
|
||||||
|
}
|
||||||
|
func randStringFieldMask(r randyFieldMask) string {
|
||||||
|
v2 := r.Intn(100)
|
||||||
|
tmps := make([]rune, v2)
|
||||||
|
for i := 0; i < v2; i++ {
|
||||||
|
tmps[i] = randUTF8RuneFieldMask(r)
|
||||||
|
}
|
||||||
|
return string(tmps)
|
||||||
|
}
|
||||||
|
func randUnrecognizedFieldMask(r randyFieldMask, maxFieldNumber int) (dAtA []byte) {
|
||||||
|
l := r.Intn(5)
|
||||||
|
for i := 0; i < l; i++ {
|
||||||
|
wire := r.Intn(4)
|
||||||
|
if wire == 3 {
|
||||||
|
wire = 5
|
||||||
|
}
|
||||||
|
fieldNumber := maxFieldNumber + r.Intn(100)
|
||||||
|
dAtA = randFieldFieldMask(dAtA, r, fieldNumber, wire)
|
||||||
|
}
|
||||||
|
return dAtA
|
||||||
|
}
|
||||||
|
func randFieldFieldMask(dAtA []byte, r randyFieldMask, fieldNumber int, wire int) []byte {
|
||||||
|
key := uint32(fieldNumber)<<3 | uint32(wire)
|
||||||
|
switch wire {
|
||||||
|
case 0:
|
||||||
|
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key))
|
||||||
|
v3 := r.Int63()
|
||||||
|
if r.Intn(2) == 0 {
|
||||||
|
v3 *= -1
|
||||||
|
}
|
||||||
|
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(v3))
|
||||||
|
case 1:
|
||||||
|
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key))
|
||||||
|
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||||
|
case 2:
|
||||||
|
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key))
|
||||||
|
ll := r.Intn(100)
|
||||||
|
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(ll))
|
||||||
|
for j := 0; j < ll; j++ {
|
||||||
|
dAtA = append(dAtA, byte(r.Intn(256)))
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key))
|
||||||
|
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||||
|
}
|
||||||
|
return dAtA
|
||||||
|
}
|
||||||
|
func encodeVarintPopulateFieldMask(dAtA []byte, v uint64) []byte {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))
|
||||||
|
v >>= 7
|
||||||
|
}
|
||||||
|
dAtA = append(dAtA, uint8(v))
|
||||||
|
return dAtA
|
||||||
|
}
|
||||||
|
func (m *FieldMask) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if len(m.Paths) > 0 {
|
||||||
|
for _, s := range m.Paths {
|
||||||
|
l = len(s)
|
||||||
|
n += 1 + l + sovFieldMask(uint64(l))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovFieldMask(x uint64) (n int) {
|
||||||
|
for {
|
||||||
|
n++
|
||||||
|
x >>= 7
|
||||||
|
if x == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
func sozFieldMask(x uint64) (n int) {
|
||||||
|
return sovFieldMask(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (this *FieldMask) String() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := strings.Join([]string{`&FieldMask{`,
|
||||||
|
`Paths:` + fmt.Sprintf("%v", this.Paths) + `,`,
|
||||||
|
`}`,
|
||||||
|
}, "")
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
func valueToStringFieldMask(v interface{}) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("*%v", pv)
|
||||||
|
}
|
||||||
|
func (m *FieldMask) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowFieldMask
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: FieldMask: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: FieldMask: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 2 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType)
|
||||||
|
}
|
||||||
|
var stringLen uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowFieldMask
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
stringLen |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
intStringLen := int(stringLen)
|
||||||
|
if intStringLen < 0 {
|
||||||
|
return ErrInvalidLengthFieldMask
|
||||||
|
}
|
||||||
|
postIndex := iNdEx + intStringLen
|
||||||
|
if postIndex > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex]))
|
||||||
|
iNdEx = postIndex
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipFieldMask(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthFieldMask
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipFieldMask(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFieldMask
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFieldMask
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
return iNdEx, nil
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFieldMask
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthFieldMask
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 3:
|
||||||
|
for {
|
||||||
|
var innerWire uint64
|
||||||
|
var start int = iNdEx
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowFieldMask
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
innerWireType := int(innerWire & 0x7)
|
||||||
|
if innerWireType == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next, err := skipFieldMask(dAtA[start:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iNdEx = start + next
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 4:
|
||||||
|
return iNdEx, nil
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
return iNdEx, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthFieldMask = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowFieldMask = fmt.Errorf("proto: integer overflow")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("field_mask.proto", fileDescriptorFieldMask) }
|
||||||
|
|
||||||
|
var fileDescriptorFieldMask = []byte{
|
||||||
|
// 193 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x48, 0xcb, 0x4c, 0xcd,
|
||||||
|
0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf,
|
||||||
|
0xcf, 0x4f, 0xcf, 0x49, 0x85, 0xf0, 0x92, 0x4a, 0xd3, 0x94, 0x14, 0xb9, 0x38, 0xdd, 0x40, 0x8a,
|
||||||
|
0x7c, 0x13, 0x8b, 0xb3, 0x85, 0x44, 0xb8, 0x58, 0x0b, 0x12, 0x4b, 0x32, 0x8a, 0x25, 0x18, 0x15,
|
||||||
|
0x98, 0x35, 0x38, 0x83, 0x20, 0x1c, 0xa7, 0x56, 0xc6, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94,
|
||||||
|
0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e,
|
||||||
|
0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f,
|
||||||
|
0x1e, 0xc9, 0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0xe4, 0x12, 0x4e, 0xce, 0xcf, 0xd5, 0x43,
|
||||||
|
0xb3, 0xca, 0x89, 0x0f, 0x6e, 0x51, 0x00, 0x48, 0x28, 0x80, 0x31, 0x8a, 0xb5, 0xa4, 0xb2, 0x20,
|
||||||
|
0xb5, 0x78, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0x86, 0x00, 0xa8,
|
||||||
|
0x06, 0xbd, 0xf0, 0xd4, 0x9c, 0x1c, 0xef, 0xbc, 0xfc, 0xf2, 0xbc, 0x10, 0x90, 0xb2, 0x24, 0x36,
|
||||||
|
0xb0, 0x49, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x51, 0x31, 0x89, 0xb5, 0xd6, 0x00, 0x00,
|
||||||
|
0x00,
|
||||||
|
}
|
1888
vendor/github.com/gogo/protobuf/types/struct.pb.go
generated
vendored
Normal file
1888
vendor/github.com/gogo/protobuf/types/struct.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
123
vendor/github.com/gogo/protobuf/types/timestamp.go
generated
vendored
Normal file
123
vendor/github.com/gogo/protobuf/types/timestamp.go
generated
vendored
Normal file
|
@ -0,0 +1,123 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
// This file implements operations on google.protobuf.Timestamp.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Seconds field of the earliest valid Timestamp.
|
||||||
|
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||||
|
minValidSeconds = -62135596800
|
||||||
|
// Seconds field just after the latest valid Timestamp.
|
||||||
|
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||||
|
maxValidSeconds = 253402300800
|
||||||
|
)
|
||||||
|
|
||||||
|
// validateTimestamp determines whether a Timestamp is valid.
|
||||||
|
// A valid timestamp represents a time in the range
|
||||||
|
// [0001-01-01, 10000-01-01) and has a Nanos field
|
||||||
|
// in the range [0, 1e9).
|
||||||
|
//
|
||||||
|
// If the Timestamp is valid, validateTimestamp returns nil.
|
||||||
|
// Otherwise, it returns an error that describes
|
||||||
|
// the problem.
|
||||||
|
//
|
||||||
|
// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
|
||||||
|
func validateTimestamp(ts *Timestamp) error {
|
||||||
|
if ts == nil {
|
||||||
|
return errors.New("timestamp: nil Timestamp")
|
||||||
|
}
|
||||||
|
if ts.Seconds < minValidSeconds {
|
||||||
|
return fmt.Errorf("timestamp: %#v before 0001-01-01", ts)
|
||||||
|
}
|
||||||
|
if ts.Seconds >= maxValidSeconds {
|
||||||
|
return fmt.Errorf("timestamp: %#v after 10000-01-01", ts)
|
||||||
|
}
|
||||||
|
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
|
||||||
|
return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time.
|
||||||
|
// It returns an error if the argument is invalid.
|
||||||
|
//
|
||||||
|
// Unlike most Go functions, if Timestamp returns an error, the first return value
|
||||||
|
// is not the zero time.Time. Instead, it is the value obtained from the
|
||||||
|
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
||||||
|
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
||||||
|
// do map to valid time.Times.
|
||||||
|
//
|
||||||
|
// A nil Timestamp returns an error. The first return value in that case is
|
||||||
|
// undefined.
|
||||||
|
func TimestampFromProto(ts *Timestamp) (time.Time, error) {
|
||||||
|
// Don't return the zero value on error, because corresponds to a valid
|
||||||
|
// timestamp. Instead return whatever time.Unix gives us.
|
||||||
|
var t time.Time
|
||||||
|
if ts == nil {
|
||||||
|
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
||||||
|
} else {
|
||||||
|
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
||||||
|
}
|
||||||
|
return t, validateTimestamp(ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
||||||
|
// It returns an error if the resulting Timestamp is invalid.
|
||||||
|
func TimestampProto(t time.Time) (*Timestamp, error) {
|
||||||
|
seconds := t.Unix()
|
||||||
|
nanos := int32(t.Sub(time.Unix(seconds, 0)))
|
||||||
|
ts := &Timestamp{
|
||||||
|
Seconds: seconds,
|
||||||
|
Nanos: nanos,
|
||||||
|
}
|
||||||
|
if err := validateTimestamp(ts); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ts, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
|
||||||
|
// Timestamps, it returns an error message in parentheses.
|
||||||
|
func TimestampString(ts *Timestamp) string {
|
||||||
|
t, err := TimestampFromProto(ts)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Sprintf("(%v)", err)
|
||||||
|
}
|
||||||
|
return t.Format(time.RFC3339Nano)
|
||||||
|
}
|
504
vendor/github.com/gogo/protobuf/types/timestamp.pb.go
generated
vendored
Normal file
504
vendor/github.com/gogo/protobuf/types/timestamp.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,504 @@
|
||||||
|
// Code generated by protoc-gen-gogo.
|
||||||
|
// source: timestamp.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package types is a generated protocol buffer package.
|
||||||
|
|
||||||
|
It is generated from these files:
|
||||||
|
timestamp.proto
|
||||||
|
|
||||||
|
It has these top-level messages:
|
||||||
|
Timestamp
|
||||||
|
*/
|
||||||
|
package types
|
||||||
|
|
||||||
|
import proto "github.com/gogo/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
|
||||||
|
import strings "strings"
|
||||||
|
import reflect "reflect"
|
||||||
|
|
||||||
|
import io "io"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
|
// A Timestamp represents a point in time independent of any time zone
|
||||||
|
// or calendar, represented as seconds and fractions of seconds at
|
||||||
|
// nanosecond resolution in UTC Epoch time. It is encoded using the
|
||||||
|
// Proleptic Gregorian Calendar which extends the Gregorian calendar
|
||||||
|
// backwards to year one. It is encoded assuming all minutes are 60
|
||||||
|
// seconds long, i.e. leap seconds are "smeared" so that no leap second
|
||||||
|
// table is needed for interpretation. Range is from
|
||||||
|
// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z.
|
||||||
|
// By restricting to that range, we ensure that we can convert to
|
||||||
|
// and from RFC 3339 date strings.
|
||||||
|
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
|
||||||
|
//
|
||||||
|
// Example 1: Compute Timestamp from POSIX `time()`.
|
||||||
|
//
|
||||||
|
// Timestamp timestamp;
|
||||||
|
// timestamp.set_seconds(time(NULL));
|
||||||
|
// timestamp.set_nanos(0);
|
||||||
|
//
|
||||||
|
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
|
||||||
|
//
|
||||||
|
// struct timeval tv;
|
||||||
|
// gettimeofday(&tv, NULL);
|
||||||
|
//
|
||||||
|
// Timestamp timestamp;
|
||||||
|
// timestamp.set_seconds(tv.tv_sec);
|
||||||
|
// timestamp.set_nanos(tv.tv_usec * 1000);
|
||||||
|
//
|
||||||
|
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
|
||||||
|
//
|
||||||
|
// FILETIME ft;
|
||||||
|
// GetSystemTimeAsFileTime(&ft);
|
||||||
|
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
||||||
|
//
|
||||||
|
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
|
||||||
|
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
|
||||||
|
// Timestamp timestamp;
|
||||||
|
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
|
||||||
|
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
|
||||||
|
//
|
||||||
|
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
|
||||||
|
//
|
||||||
|
// long millis = System.currentTimeMillis();
|
||||||
|
//
|
||||||
|
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
||||||
|
// .setNanos((int) ((millis % 1000) * 1000000)).build();
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// Example 5: Compute Timestamp from current time in Python.
|
||||||
|
//
|
||||||
|
// timestamp = Timestamp()
|
||||||
|
// timestamp.GetCurrentTime()
|
||||||
|
//
|
||||||
|
//
|
||||||
|
type Timestamp struct {
|
||||||
|
// Represents seconds of UTC time since Unix epoch
|
||||||
|
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
||||||
|
// 9999-12-31T23:59:59Z inclusive.
|
||||||
|
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
||||||
|
// Non-negative fractions of a second at nanosecond resolution. Negative
|
||||||
|
// second values with fractions must still have non-negative nanos values
|
||||||
|
// that count forward in time. Must be from 0 to 999,999,999
|
||||||
|
// inclusive.
|
||||||
|
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
||||||
|
func (*Timestamp) ProtoMessage() {}
|
||||||
|
func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorTimestamp, []int{0} }
|
||||||
|
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
|
||||||
|
|
||||||
|
func (m *Timestamp) GetSeconds() int64 {
|
||||||
|
if m != nil {
|
||||||
|
return m.Seconds
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Timestamp) GetNanos() int32 {
|
||||||
|
if m != nil {
|
||||||
|
return m.Nanos
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
|
||||||
|
}
|
||||||
|
func (this *Timestamp) Compare(that interface{}) int {
|
||||||
|
if that == nil {
|
||||||
|
if this == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*Timestamp)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(Timestamp)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
if this == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
} else if this == nil {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
if this.Seconds != that1.Seconds {
|
||||||
|
if this.Seconds < that1.Seconds {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
if this.Nanos != that1.Nanos {
|
||||||
|
if this.Nanos < that1.Nanos {
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
func (this *Timestamp) Equal(that interface{}) bool {
|
||||||
|
if that == nil {
|
||||||
|
if this == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
that1, ok := that.(*Timestamp)
|
||||||
|
if !ok {
|
||||||
|
that2, ok := that.(Timestamp)
|
||||||
|
if ok {
|
||||||
|
that1 = &that2
|
||||||
|
} else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if that1 == nil {
|
||||||
|
if this == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
} else if this == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if this.Seconds != that1.Seconds {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if this.Nanos != that1.Nanos {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
func (this *Timestamp) GoString() string {
|
||||||
|
if this == nil {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
s := make([]string, 0, 6)
|
||||||
|
s = append(s, "&types.Timestamp{")
|
||||||
|
s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n")
|
||||||
|
s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n")
|
||||||
|
s = append(s, "}")
|
||||||
|
return strings.Join(s, "")
|
||||||
|
}
|
||||||
|
func valueToGoStringTimestamp(v interface{}, typ string) string {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.IsNil() {
|
||||||
|
return "nil"
|
||||||
|
}
|
||||||
|
pv := reflect.Indirect(rv).Interface()
|
||||||
|
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||||
|
}
|
||||||
|
func (m *Timestamp) Marshal() (dAtA []byte, err error) {
|
||||||
|
size := m.Size()
|
||||||
|
dAtA = make([]byte, size)
|
||||||
|
n, err := m.MarshalTo(dAtA)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return dAtA[:n], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) {
|
||||||
|
var i int
|
||||||
|
_ = i
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.Seconds != 0 {
|
||||||
|
dAtA[i] = 0x8
|
||||||
|
i++
|
||||||
|
i = encodeVarintTimestamp(dAtA, i, uint64(m.Seconds))
|
||||||
|
}
|
||||||
|
if m.Nanos != 0 {
|
||||||
|
dAtA[i] = 0x10
|
||||||
|
i++
|
||||||
|
i = encodeVarintTimestamp(dAtA, i, uint64(m.Nanos))
|
||||||
|
}
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func encodeFixed64Timestamp(dAtA []byte, offset int, v uint64) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
dAtA[offset+4] = uint8(v >> 32)
|
||||||
|
dAtA[offset+5] = uint8(v >> 40)
|
||||||
|
dAtA[offset+6] = uint8(v >> 48)
|
||||||
|
dAtA[offset+7] = uint8(v >> 56)
|
||||||
|
return offset + 8
|
||||||
|
}
|
||||||
|
func encodeFixed32Timestamp(dAtA []byte, offset int, v uint32) int {
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
dAtA[offset+1] = uint8(v >> 8)
|
||||||
|
dAtA[offset+2] = uint8(v >> 16)
|
||||||
|
dAtA[offset+3] = uint8(v >> 24)
|
||||||
|
return offset + 4
|
||||||
|
}
|
||||||
|
func encodeVarintTimestamp(dAtA []byte, offset int, v uint64) int {
|
||||||
|
for v >= 1<<7 {
|
||||||
|
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||||
|
v >>= 7
|
||||||
|
offset++
|
||||||
|
}
|
||||||
|
dAtA[offset] = uint8(v)
|
||||||
|
return offset + 1
|
||||||
|
}
|
||||||
|
func (m *Timestamp) Size() (n int) {
|
||||||
|
var l int
|
||||||
|
_ = l
|
||||||
|
if m.Seconds != 0 {
|
||||||
|
n += 1 + sovTimestamp(uint64(m.Seconds))
|
||||||
|
}
|
||||||
|
if m.Nanos != 0 {
|
||||||
|
n += 1 + sovTimestamp(uint64(m.Nanos))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func sovTimestamp(x uint64) (n int) {
|
||||||
|
for {
|
||||||
|
n++
|
||||||
|
x >>= 7
|
||||||
|
if x == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
func sozTimestamp(x uint64) (n int) {
|
||||||
|
return sovTimestamp(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||||
|
}
|
||||||
|
func (m *Timestamp) Unmarshal(dAtA []byte) error {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
preIndex := iNdEx
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowTimestamp
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fieldNum := int32(wire >> 3)
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
if wireType == 4 {
|
||||||
|
return fmt.Errorf("proto: Timestamp: wiretype end group for non-group")
|
||||||
|
}
|
||||||
|
if fieldNum <= 0 {
|
||||||
|
return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||||
|
}
|
||||||
|
switch fieldNum {
|
||||||
|
case 1:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType)
|
||||||
|
}
|
||||||
|
m.Seconds = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowTimestamp
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.Seconds |= (int64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case 2:
|
||||||
|
if wireType != 0 {
|
||||||
|
return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType)
|
||||||
|
}
|
||||||
|
m.Nanos = 0
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return ErrIntOverflowTimestamp
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
m.Nanos |= (int32(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
iNdEx = preIndex
|
||||||
|
skippy, err := skipTimestamp(dAtA[iNdEx:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if skippy < 0 {
|
||||||
|
return ErrInvalidLengthTimestamp
|
||||||
|
}
|
||||||
|
if (iNdEx + skippy) > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx += skippy
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if iNdEx > l {
|
||||||
|
return io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func skipTimestamp(dAtA []byte) (n int, err error) {
|
||||||
|
l := len(dAtA)
|
||||||
|
iNdEx := 0
|
||||||
|
for iNdEx < l {
|
||||||
|
var wire uint64
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowTimestamp
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
wire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
wireType := int(wire & 0x7)
|
||||||
|
switch wireType {
|
||||||
|
case 0:
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowTimestamp
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
iNdEx++
|
||||||
|
if dAtA[iNdEx-1] < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 1:
|
||||||
|
iNdEx += 8
|
||||||
|
return iNdEx, nil
|
||||||
|
case 2:
|
||||||
|
var length int
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowTimestamp
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
length |= (int(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
iNdEx += length
|
||||||
|
if length < 0 {
|
||||||
|
return 0, ErrInvalidLengthTimestamp
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 3:
|
||||||
|
for {
|
||||||
|
var innerWire uint64
|
||||||
|
var start int = iNdEx
|
||||||
|
for shift := uint(0); ; shift += 7 {
|
||||||
|
if shift >= 64 {
|
||||||
|
return 0, ErrIntOverflowTimestamp
|
||||||
|
}
|
||||||
|
if iNdEx >= l {
|
||||||
|
return 0, io.ErrUnexpectedEOF
|
||||||
|
}
|
||||||
|
b := dAtA[iNdEx]
|
||||||
|
iNdEx++
|
||||||
|
innerWire |= (uint64(b) & 0x7F) << shift
|
||||||
|
if b < 0x80 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
innerWireType := int(innerWire & 0x7)
|
||||||
|
if innerWireType == 4 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
next, err := skipTimestamp(dAtA[start:])
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
iNdEx = start + next
|
||||||
|
}
|
||||||
|
return iNdEx, nil
|
||||||
|
case 4:
|
||||||
|
return iNdEx, nil
|
||||||
|
case 5:
|
||||||
|
iNdEx += 4
|
||||||
|
return iNdEx, nil
|
||||||
|
default:
|
||||||
|
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidLengthTimestamp = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||||
|
ErrIntOverflowTimestamp = fmt.Errorf("proto: integer overflow")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("timestamp.proto", fileDescriptorTimestamp) }
|
||||||
|
|
||||||
|
var fileDescriptorTimestamp = []byte{
|
||||||
|
// 205 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2f, 0xc9, 0xcc, 0x4d,
|
||||||
|
0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4f, 0xcf, 0xcf,
|
||||||
|
0x4f, 0xcf, 0x49, 0x85, 0xf0, 0x92, 0x4a, 0xd3, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84,
|
||||||
|
0x24, 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98,
|
||||||
|
0x83, 0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46,
|
||||||
|
0x0d, 0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x81, 0xf1, 0xc2, 0x43, 0x39, 0x86, 0x1b, 0x0f, 0xe5, 0x18,
|
||||||
|
0x3e, 0x3c, 0x94, 0x63, 0x5c, 0xf1, 0x48, 0x8e, 0xf1, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4,
|
||||||
|
0x18, 0x1f, 0x3c, 0x92, 0x63, 0x7c, 0xf1, 0x48, 0x8e, 0xe1, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f,
|
||||||
|
0xe5, 0x18, 0xb9, 0x84, 0x93, 0xf3, 0x73, 0xf5, 0xd0, 0x2c, 0x77, 0xe2, 0x83, 0x5b, 0x1d, 0x00,
|
||||||
|
0x12, 0x0a, 0x60, 0x8c, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0xfe, 0xc1, 0xc8, 0xb8, 0x88, 0x89,
|
||||||
|
0xd9, 0x3d, 0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0x44, 0x4f, 0x00, 0x54, 0x8f, 0x5e, 0x78, 0x6a,
|
||||||
|
0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x65, 0x12, 0x1b, 0xd8, 0x30, 0x63, 0x40,
|
||||||
|
0x00, 0x00, 0x00, 0xff, 0xff, 0x9b, 0xa2, 0x42, 0xda, 0xea, 0x00, 0x00, 0x00,
|
||||||
|
}
|
94
vendor/github.com/gogo/protobuf/types/timestamp_gogo.go
generated
vendored
Normal file
94
vendor/github.com/gogo/protobuf/types/timestamp_gogo.go
generated
vendored
Normal file
|
@ -0,0 +1,94 @@
|
||||||
|
// Protocol Buffers for Go with Gadgets
|
||||||
|
//
|
||||||
|
// Copyright (c) 2016, The GoGo Authors. All rights reserved.
|
||||||
|
// http://github.com/gogo/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
package types
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewPopulatedTimestamp(r interface {
|
||||||
|
Int63() int64
|
||||||
|
}, easy bool) *Timestamp {
|
||||||
|
this := &Timestamp{}
|
||||||
|
ns := int64(r.Int63())
|
||||||
|
this.Seconds = ns / 1e9
|
||||||
|
this.Nanos = int32(ns % 1e9)
|
||||||
|
return this
|
||||||
|
}
|
||||||
|
|
||||||
|
func (ts *Timestamp) String() string {
|
||||||
|
return TimestampString(ts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewPopulatedStdTime(r interface {
|
||||||
|
Int63() int64
|
||||||
|
}, easy bool) *time.Time {
|
||||||
|
timestamp := NewPopulatedTimestamp(r, easy)
|
||||||
|
t, err := TimestampFromProto(timestamp)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &t
|
||||||
|
}
|
||||||
|
|
||||||
|
func SizeOfStdTime(t time.Time) int {
|
||||||
|
ts, err := TimestampProto(t)
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return ts.Size()
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdTimeMarshal(t time.Time) ([]byte, error) {
|
||||||
|
size := SizeOfStdTime(t)
|
||||||
|
buf := make([]byte, size)
|
||||||
|
_, err := StdTimeMarshalTo(t, buf)
|
||||||
|
return buf, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdTimeMarshalTo(t time.Time, data []byte) (int, error) {
|
||||||
|
ts, err := TimestampProto(t)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return ts.MarshalTo(data)
|
||||||
|
}
|
||||||
|
|
||||||
|
func StdTimeUnmarshal(t *time.Time, data []byte) error {
|
||||||
|
ts := &Timestamp{}
|
||||||
|
if err := ts.Unmarshal(data); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
tt, err := TimestampFromProto(ts)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
*t = tt
|
||||||
|
return nil
|
||||||
|
}
|
2259
vendor/github.com/gogo/protobuf/types/wrappers.pb.go
generated
vendored
Normal file
2259
vendor/github.com/gogo/protobuf/types/wrappers.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
843
vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
generated
vendored
Normal file
843
vendor/github.com/golang/protobuf/jsonpb/jsonpb.go
generated
vendored
Normal file
|
@ -0,0 +1,843 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON.
|
||||||
|
It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json.
|
||||||
|
|
||||||
|
This package produces a different output than the standard "encoding/json" package,
|
||||||
|
which does not operate correctly on protocol buffers.
|
||||||
|
*/
|
||||||
|
package jsonpb
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Marshaler is a configurable object for converting between
|
||||||
|
// protocol buffer objects and a JSON representation for them.
|
||||||
|
type Marshaler struct {
|
||||||
|
// Whether to render enum values as integers, as opposed to string values.
|
||||||
|
EnumsAsInts bool
|
||||||
|
|
||||||
|
// Whether to render fields with zero values.
|
||||||
|
EmitDefaults bool
|
||||||
|
|
||||||
|
// A string to indent each level by. The presence of this field will
|
||||||
|
// also cause a space to appear between the field separator and
|
||||||
|
// value, and for newlines to be appear between fields and array
|
||||||
|
// elements.
|
||||||
|
Indent string
|
||||||
|
|
||||||
|
// Whether to use the original (.proto) name for fields.
|
||||||
|
OrigName bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal marshals a protocol buffer into JSON.
|
||||||
|
func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error {
|
||||||
|
writer := &errWriter{writer: out}
|
||||||
|
return m.marshalObject(writer, pb, "", "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalToString converts a protocol buffer object to JSON string.
|
||||||
|
func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := m.Marshal(&buf, pb); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return buf.String(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type int32Slice []int32
|
||||||
|
|
||||||
|
// For sorting extensions ids to ensure stable output.
|
||||||
|
func (s int32Slice) Len() int { return len(s) }
|
||||||
|
func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
|
||||||
|
func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
|
||||||
|
type wkt interface {
|
||||||
|
XXX_WellKnownType() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalObject writes a struct to the Writer.
|
||||||
|
func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error {
|
||||||
|
s := reflect.ValueOf(v).Elem()
|
||||||
|
|
||||||
|
// Handle well-known types.
|
||||||
|
if wkt, ok := v.(wkt); ok {
|
||||||
|
switch wkt.XXX_WellKnownType() {
|
||||||
|
case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
|
||||||
|
"Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
|
||||||
|
// "Wrappers use the same representation in JSON
|
||||||
|
// as the wrapped primitive type, ..."
|
||||||
|
sprop := proto.GetProperties(s.Type())
|
||||||
|
return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent)
|
||||||
|
case "Any":
|
||||||
|
// Any is a bit more involved.
|
||||||
|
return m.marshalAny(out, v, indent)
|
||||||
|
case "Duration":
|
||||||
|
// "Generated output always contains 3, 6, or 9 fractional digits,
|
||||||
|
// depending on required precision."
|
||||||
|
s, ns := s.Field(0).Int(), s.Field(1).Int()
|
||||||
|
d := time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond
|
||||||
|
x := fmt.Sprintf("%.9f", d.Seconds())
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
out.write(`"`)
|
||||||
|
out.write(x)
|
||||||
|
out.write(`s"`)
|
||||||
|
return out.err
|
||||||
|
case "Struct":
|
||||||
|
// Let marshalValue handle the `fields` map.
|
||||||
|
// TODO: pass the correct Properties if needed.
|
||||||
|
return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent)
|
||||||
|
case "Timestamp":
|
||||||
|
// "RFC 3339, where generated output will always be Z-normalized
|
||||||
|
// and uses 3, 6 or 9 fractional digits."
|
||||||
|
s, ns := s.Field(0).Int(), s.Field(1).Int()
|
||||||
|
t := time.Unix(s, ns).UTC()
|
||||||
|
// time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
|
||||||
|
x := t.Format("2006-01-02T15:04:05.000000000")
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
x = strings.TrimSuffix(x, "000")
|
||||||
|
out.write(`"`)
|
||||||
|
out.write(x)
|
||||||
|
out.write(`Z"`)
|
||||||
|
return out.err
|
||||||
|
case "Value":
|
||||||
|
// Value has a single oneof.
|
||||||
|
kind := s.Field(0)
|
||||||
|
if kind.IsNil() {
|
||||||
|
// "absence of any variant indicates an error"
|
||||||
|
return errors.New("nil Value")
|
||||||
|
}
|
||||||
|
// oneof -> *T -> T -> T.F
|
||||||
|
x := kind.Elem().Elem().Field(0)
|
||||||
|
// TODO: pass the correct Properties if needed.
|
||||||
|
return m.marshalValue(out, &proto.Properties{}, x, indent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out.write("{")
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
firstField := true
|
||||||
|
|
||||||
|
if typeURL != "" {
|
||||||
|
if err := m.marshalTypeURL(out, indent, typeURL); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
firstField = false
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < s.NumField(); i++ {
|
||||||
|
value := s.Field(i)
|
||||||
|
valueField := s.Type().Field(i)
|
||||||
|
if strings.HasPrefix(valueField.Name, "XXX_") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNil will panic on most value kinds.
|
||||||
|
switch value.Kind() {
|
||||||
|
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||||
|
if value.IsNil() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !m.EmitDefaults {
|
||||||
|
switch value.Kind() {
|
||||||
|
case reflect.Bool:
|
||||||
|
if !value.Bool() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case reflect.Int32, reflect.Int64:
|
||||||
|
if value.Int() == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case reflect.Uint32, reflect.Uint64:
|
||||||
|
if value.Uint() == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
if value.Float() == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
case reflect.String:
|
||||||
|
if value.Len() == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Oneof fields need special handling.
|
||||||
|
if valueField.Tag.Get("protobuf_oneof") != "" {
|
||||||
|
// value is an interface containing &T{real_value}.
|
||||||
|
sv := value.Elem().Elem() // interface -> *T -> T
|
||||||
|
value = sv.Field(0)
|
||||||
|
valueField = sv.Type().Field(0)
|
||||||
|
}
|
||||||
|
prop := jsonProperties(valueField, m.OrigName)
|
||||||
|
if !firstField {
|
||||||
|
m.writeSep(out)
|
||||||
|
}
|
||||||
|
if err := m.marshalField(out, prop, value, indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
firstField = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle proto2 extensions.
|
||||||
|
if ep, ok := v.(proto.Message); ok {
|
||||||
|
extensions := proto.RegisteredExtensions(v)
|
||||||
|
// Sort extensions for stable output.
|
||||||
|
ids := make([]int32, 0, len(extensions))
|
||||||
|
for id, desc := range extensions {
|
||||||
|
if !proto.HasExtension(ep, desc) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ids = append(ids, id)
|
||||||
|
}
|
||||||
|
sort.Sort(int32Slice(ids))
|
||||||
|
for _, id := range ids {
|
||||||
|
desc := extensions[id]
|
||||||
|
if desc == nil {
|
||||||
|
// unknown extension
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
ext, extErr := proto.GetExtension(ep, desc)
|
||||||
|
if extErr != nil {
|
||||||
|
return extErr
|
||||||
|
}
|
||||||
|
value := reflect.ValueOf(ext)
|
||||||
|
var prop proto.Properties
|
||||||
|
prop.Parse(desc.Tag)
|
||||||
|
prop.JSONName = fmt.Sprintf("[%s]", desc.Name)
|
||||||
|
if !firstField {
|
||||||
|
m.writeSep(out)
|
||||||
|
}
|
||||||
|
if err := m.marshalField(out, &prop, value, indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
firstField = false
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
out.write(indent)
|
||||||
|
}
|
||||||
|
out.write("}")
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Marshaler) writeSep(out *errWriter) {
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write(",\n")
|
||||||
|
} else {
|
||||||
|
out.write(",")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error {
|
||||||
|
// "If the Any contains a value that has a special JSON mapping,
|
||||||
|
// it will be converted as follows: {"@type": xxx, "value": yyy}.
|
||||||
|
// Otherwise, the value will be converted into a JSON object,
|
||||||
|
// and the "@type" field will be inserted to indicate the actual data type."
|
||||||
|
v := reflect.ValueOf(any).Elem()
|
||||||
|
turl := v.Field(0).String()
|
||||||
|
val := v.Field(1).Bytes()
|
||||||
|
|
||||||
|
// Only the part of type_url after the last slash is relevant.
|
||||||
|
mname := turl
|
||||||
|
if slash := strings.LastIndex(mname, "/"); slash >= 0 {
|
||||||
|
mname = mname[slash+1:]
|
||||||
|
}
|
||||||
|
mt := proto.MessageType(mname)
|
||||||
|
if mt == nil {
|
||||||
|
return fmt.Errorf("unknown message type %q", mname)
|
||||||
|
}
|
||||||
|
msg := reflect.New(mt.Elem()).Interface().(proto.Message)
|
||||||
|
if err := proto.Unmarshal(val, msg); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := msg.(wkt); ok {
|
||||||
|
out.write("{")
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
}
|
||||||
|
if err := m.marshalTypeURL(out, indent, turl); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
m.writeSep(out)
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write(indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
out.write(`"value": `)
|
||||||
|
} else {
|
||||||
|
out.write(`"value":`)
|
||||||
|
}
|
||||||
|
if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
out.write(indent)
|
||||||
|
}
|
||||||
|
out.write("}")
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
|
||||||
|
return m.marshalObject(out, msg, indent, turl)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error {
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write(indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
}
|
||||||
|
out.write(`"@type":`)
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write(" ")
|
||||||
|
}
|
||||||
|
b, err := json.Marshal(typeURL)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
out.write(string(b))
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalField writes field description and value to the Writer.
|
||||||
|
func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write(indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
}
|
||||||
|
out.write(`"`)
|
||||||
|
out.write(prop.JSONName)
|
||||||
|
out.write(`":`)
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write(" ")
|
||||||
|
}
|
||||||
|
if err := m.marshalValue(out, prop, v, indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalValue writes the value to the Writer.
|
||||||
|
func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error {
|
||||||
|
|
||||||
|
var err error
|
||||||
|
v = reflect.Indirect(v)
|
||||||
|
|
||||||
|
// Handle repeated elements.
|
||||||
|
if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
|
||||||
|
out.write("[")
|
||||||
|
comma := ""
|
||||||
|
for i := 0; i < v.Len(); i++ {
|
||||||
|
sliceVal := v.Index(i)
|
||||||
|
out.write(comma)
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
out.write(indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
}
|
||||||
|
if err := m.marshalValue(out, prop, sliceVal, indent+m.Indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
comma = ","
|
||||||
|
}
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
out.write(indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
}
|
||||||
|
out.write("]")
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle well-known types.
|
||||||
|
// Most are handled up in marshalObject (because 99% are messages).
|
||||||
|
type wkt interface {
|
||||||
|
XXX_WellKnownType() string
|
||||||
|
}
|
||||||
|
if wkt, ok := v.Interface().(wkt); ok {
|
||||||
|
switch wkt.XXX_WellKnownType() {
|
||||||
|
case "NullValue":
|
||||||
|
out.write("null")
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle enumerations.
|
||||||
|
if !m.EnumsAsInts && prop.Enum != "" {
|
||||||
|
// Unknown enum values will are stringified by the proto library as their
|
||||||
|
// value. Such values should _not_ be quoted or they will be interpreted
|
||||||
|
// as an enum string instead of their value.
|
||||||
|
enumStr := v.Interface().(fmt.Stringer).String()
|
||||||
|
var valStr string
|
||||||
|
if v.Kind() == reflect.Ptr {
|
||||||
|
valStr = strconv.Itoa(int(v.Elem().Int()))
|
||||||
|
} else {
|
||||||
|
valStr = strconv.Itoa(int(v.Int()))
|
||||||
|
}
|
||||||
|
isKnownEnum := enumStr != valStr
|
||||||
|
if isKnownEnum {
|
||||||
|
out.write(`"`)
|
||||||
|
}
|
||||||
|
out.write(enumStr)
|
||||||
|
if isKnownEnum {
|
||||||
|
out.write(`"`)
|
||||||
|
}
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle nested messages.
|
||||||
|
if v.Kind() == reflect.Struct {
|
||||||
|
return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent, "")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle maps.
|
||||||
|
// Since Go randomizes map iteration, we sort keys for stable output.
|
||||||
|
if v.Kind() == reflect.Map {
|
||||||
|
out.write(`{`)
|
||||||
|
keys := v.MapKeys()
|
||||||
|
sort.Sort(mapKeys(keys))
|
||||||
|
for i, k := range keys {
|
||||||
|
if i > 0 {
|
||||||
|
out.write(`,`)
|
||||||
|
}
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
out.write(indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
}
|
||||||
|
|
||||||
|
b, err := json.Marshal(k.Interface())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s := string(b)
|
||||||
|
|
||||||
|
// If the JSON is not a string value, encode it again to make it one.
|
||||||
|
if !strings.HasPrefix(s, `"`) {
|
||||||
|
b, err := json.Marshal(s)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
s = string(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
out.write(s)
|
||||||
|
out.write(`:`)
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write(` `)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := m.marshalValue(out, prop, v.MapIndex(k), indent+m.Indent); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if m.Indent != "" {
|
||||||
|
out.write("\n")
|
||||||
|
out.write(indent)
|
||||||
|
out.write(m.Indent)
|
||||||
|
}
|
||||||
|
out.write(`}`)
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default handling defers to the encoding/json library.
|
||||||
|
b, err := json.Marshal(v.Interface())
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64)
|
||||||
|
if needToQuote {
|
||||||
|
out.write(`"`)
|
||||||
|
}
|
||||||
|
out.write(string(b))
|
||||||
|
if needToQuote {
|
||||||
|
out.write(`"`)
|
||||||
|
}
|
||||||
|
return out.err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshaler is a configurable object for converting from a JSON
|
||||||
|
// representation to a protocol buffer object.
|
||||||
|
type Unmarshaler struct {
|
||||||
|
// Whether to allow messages to contain unknown fields, as opposed to
|
||||||
|
// failing to unmarshal.
|
||||||
|
AllowUnknownFields bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
|
||||||
|
// This function is lenient and will decode any options permutations of the
|
||||||
|
// related Marshaler.
|
||||||
|
func (u *Unmarshaler) UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
|
||||||
|
inputValue := json.RawMessage{}
|
||||||
|
if err := dec.Decode(&inputValue); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return u.unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal unmarshals a JSON object stream into a protocol
|
||||||
|
// buffer. This function is lenient and will decode any options
|
||||||
|
// permutations of the related Marshaler.
|
||||||
|
func (u *Unmarshaler) Unmarshal(r io.Reader, pb proto.Message) error {
|
||||||
|
dec := json.NewDecoder(r)
|
||||||
|
return u.UnmarshalNext(dec, pb)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalNext unmarshals the next protocol buffer from a JSON object stream.
|
||||||
|
// This function is lenient and will decode any options permutations of the
|
||||||
|
// related Marshaler.
|
||||||
|
func UnmarshalNext(dec *json.Decoder, pb proto.Message) error {
|
||||||
|
return new(Unmarshaler).UnmarshalNext(dec, pb)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal unmarshals a JSON object stream into a protocol
|
||||||
|
// buffer. This function is lenient and will decode any options
|
||||||
|
// permutations of the related Marshaler.
|
||||||
|
func Unmarshal(r io.Reader, pb proto.Message) error {
|
||||||
|
return new(Unmarshaler).Unmarshal(r, pb)
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalString will populate the fields of a protocol buffer based
|
||||||
|
// on a JSON string. This function is lenient and will decode any options
|
||||||
|
// permutations of the related Marshaler.
|
||||||
|
func UnmarshalString(str string, pb proto.Message) error {
|
||||||
|
return new(Unmarshaler).Unmarshal(strings.NewReader(str), pb)
|
||||||
|
}
|
||||||
|
|
||||||
|
// unmarshalValue converts/copies a value into the target.
|
||||||
|
// prop may be nil.
|
||||||
|
func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMessage, prop *proto.Properties) error {
|
||||||
|
targetType := target.Type()
|
||||||
|
|
||||||
|
// Allocate memory for pointer fields.
|
||||||
|
if targetType.Kind() == reflect.Ptr {
|
||||||
|
target.Set(reflect.New(targetType.Elem()))
|
||||||
|
return u.unmarshalValue(target.Elem(), inputValue, prop)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle well-known types.
|
||||||
|
type wkt interface {
|
||||||
|
XXX_WellKnownType() string
|
||||||
|
}
|
||||||
|
if wkt, ok := target.Addr().Interface().(wkt); ok {
|
||||||
|
switch wkt.XXX_WellKnownType() {
|
||||||
|
case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value",
|
||||||
|
"Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue":
|
||||||
|
// "Wrappers use the same representation in JSON
|
||||||
|
// as the wrapped primitive type, except that null is allowed."
|
||||||
|
// encoding/json will turn JSON `null` into Go `nil`,
|
||||||
|
// so we don't have to do any extra work.
|
||||||
|
return u.unmarshalValue(target.Field(0), inputValue, prop)
|
||||||
|
case "Any":
|
||||||
|
return fmt.Errorf("unmarshaling Any not supported yet")
|
||||||
|
case "Duration":
|
||||||
|
ivStr := string(inputValue)
|
||||||
|
if ivStr == "null" {
|
||||||
|
target.Field(0).SetInt(0)
|
||||||
|
target.Field(1).SetInt(0)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
unq, err := strconv.Unquote(ivStr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
d, err := time.ParseDuration(unq)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("bad Duration: %v", err)
|
||||||
|
}
|
||||||
|
ns := d.Nanoseconds()
|
||||||
|
s := ns / 1e9
|
||||||
|
ns %= 1e9
|
||||||
|
target.Field(0).SetInt(s)
|
||||||
|
target.Field(1).SetInt(ns)
|
||||||
|
return nil
|
||||||
|
case "Timestamp":
|
||||||
|
ivStr := string(inputValue)
|
||||||
|
if ivStr == "null" {
|
||||||
|
target.Field(0).SetInt(0)
|
||||||
|
target.Field(1).SetInt(0)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
unq, err := strconv.Unquote(ivStr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
t, err := time.Parse(time.RFC3339Nano, unq)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("bad Timestamp: %v", err)
|
||||||
|
}
|
||||||
|
target.Field(0).SetInt(int64(t.Unix()))
|
||||||
|
target.Field(1).SetInt(int64(t.Nanosecond()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle enums, which have an underlying type of int32,
|
||||||
|
// and may appear as strings.
|
||||||
|
// The case of an enum appearing as a number is handled
|
||||||
|
// at the bottom of this function.
|
||||||
|
if inputValue[0] == '"' && prop != nil && prop.Enum != "" {
|
||||||
|
vmap := proto.EnumValueMap(prop.Enum)
|
||||||
|
// Don't need to do unquoting; valid enum names
|
||||||
|
// are from a limited character set.
|
||||||
|
s := inputValue[1 : len(inputValue)-1]
|
||||||
|
n, ok := vmap[string(s)]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unknown value %q for enum %s", s, prop.Enum)
|
||||||
|
}
|
||||||
|
if target.Kind() == reflect.Ptr { // proto2
|
||||||
|
target.Set(reflect.New(targetType.Elem()))
|
||||||
|
target = target.Elem()
|
||||||
|
}
|
||||||
|
target.SetInt(int64(n))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle nested messages.
|
||||||
|
if targetType.Kind() == reflect.Struct {
|
||||||
|
var jsonFields map[string]json.RawMessage
|
||||||
|
if err := json.Unmarshal(inputValue, &jsonFields); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
consumeField := func(prop *proto.Properties) (json.RawMessage, bool) {
|
||||||
|
// Be liberal in what names we accept; both orig_name and camelName are okay.
|
||||||
|
fieldNames := acceptedJSONFieldNames(prop)
|
||||||
|
|
||||||
|
vOrig, okOrig := jsonFields[fieldNames.orig]
|
||||||
|
vCamel, okCamel := jsonFields[fieldNames.camel]
|
||||||
|
if !okOrig && !okCamel {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
// If, for some reason, both are present in the data, favour the camelName.
|
||||||
|
var raw json.RawMessage
|
||||||
|
if okOrig {
|
||||||
|
raw = vOrig
|
||||||
|
delete(jsonFields, fieldNames.orig)
|
||||||
|
}
|
||||||
|
if okCamel {
|
||||||
|
raw = vCamel
|
||||||
|
delete(jsonFields, fieldNames.camel)
|
||||||
|
}
|
||||||
|
return raw, true
|
||||||
|
}
|
||||||
|
|
||||||
|
sprops := proto.GetProperties(targetType)
|
||||||
|
for i := 0; i < target.NumField(); i++ {
|
||||||
|
ft := target.Type().Field(i)
|
||||||
|
if strings.HasPrefix(ft.Name, "XXX_") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
valueForField, ok := consumeField(sprops.Prop[i])
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := u.unmarshalValue(target.Field(i), valueForField, sprops.Prop[i]); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Check for any oneof fields.
|
||||||
|
if len(jsonFields) > 0 {
|
||||||
|
for _, oop := range sprops.OneofTypes {
|
||||||
|
raw, ok := consumeField(oop.Prop)
|
||||||
|
if !ok {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
nv := reflect.New(oop.Type.Elem())
|
||||||
|
target.Field(oop.Field).Set(nv)
|
||||||
|
if err := u.unmarshalValue(nv.Elem().Field(0), raw, oop.Prop); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !u.AllowUnknownFields && len(jsonFields) > 0 {
|
||||||
|
// Pick any field to be the scapegoat.
|
||||||
|
var f string
|
||||||
|
for fname := range jsonFields {
|
||||||
|
f = fname
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return fmt.Errorf("unknown field %q in %v", f, targetType)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle arrays (which aren't encoded bytes)
|
||||||
|
if targetType.Kind() == reflect.Slice && targetType.Elem().Kind() != reflect.Uint8 {
|
||||||
|
var slc []json.RawMessage
|
||||||
|
if err := json.Unmarshal(inputValue, &slc); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
len := len(slc)
|
||||||
|
target.Set(reflect.MakeSlice(targetType, len, len))
|
||||||
|
for i := 0; i < len; i++ {
|
||||||
|
if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle maps (whose keys are always strings)
|
||||||
|
if targetType.Kind() == reflect.Map {
|
||||||
|
var mp map[string]json.RawMessage
|
||||||
|
if err := json.Unmarshal(inputValue, &mp); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
target.Set(reflect.MakeMap(targetType))
|
||||||
|
var keyprop, valprop *proto.Properties
|
||||||
|
if prop != nil {
|
||||||
|
// These could still be nil if the protobuf metadata is broken somehow.
|
||||||
|
// TODO: This won't work because the fields are unexported.
|
||||||
|
// We should probably just reparse them.
|
||||||
|
//keyprop, valprop = prop.mkeyprop, prop.mvalprop
|
||||||
|
}
|
||||||
|
for ks, raw := range mp {
|
||||||
|
// Unmarshal map key. The core json library already decoded the key into a
|
||||||
|
// string, so we handle that specially. Other types were quoted post-serialization.
|
||||||
|
var k reflect.Value
|
||||||
|
if targetType.Key().Kind() == reflect.String {
|
||||||
|
k = reflect.ValueOf(ks)
|
||||||
|
} else {
|
||||||
|
k = reflect.New(targetType.Key()).Elem()
|
||||||
|
if err := u.unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal map value.
|
||||||
|
v := reflect.New(targetType.Elem()).Elem()
|
||||||
|
if err := u.unmarshalValue(v, raw, valprop); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
target.SetMapIndex(k, v)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// 64-bit integers can be encoded as strings. In this case we drop
|
||||||
|
// the quotes and proceed as normal.
|
||||||
|
isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64
|
||||||
|
if isNum && strings.HasPrefix(string(inputValue), `"`) {
|
||||||
|
inputValue = inputValue[1 : len(inputValue)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use the encoding/json for parsing other value types.
|
||||||
|
return json.Unmarshal(inputValue, target.Addr().Interface())
|
||||||
|
}
|
||||||
|
|
||||||
|
// jsonProperties returns parsed proto.Properties for the field and corrects JSONName attribute.
|
||||||
|
func jsonProperties(f reflect.StructField, origName bool) *proto.Properties {
|
||||||
|
var prop proto.Properties
|
||||||
|
prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f)
|
||||||
|
if origName || prop.JSONName == "" {
|
||||||
|
prop.JSONName = prop.OrigName
|
||||||
|
}
|
||||||
|
return &prop
|
||||||
|
}
|
||||||
|
|
||||||
|
type fieldNames struct {
|
||||||
|
orig, camel string
|
||||||
|
}
|
||||||
|
|
||||||
|
func acceptedJSONFieldNames(prop *proto.Properties) fieldNames {
|
||||||
|
opts := fieldNames{orig: prop.OrigName, camel: prop.OrigName}
|
||||||
|
if prop.JSONName != "" {
|
||||||
|
opts.camel = prop.JSONName
|
||||||
|
}
|
||||||
|
return opts
|
||||||
|
}
|
||||||
|
|
||||||
|
// Writer wrapper inspired by https://blog.golang.org/errors-are-values
|
||||||
|
type errWriter struct {
|
||||||
|
writer io.Writer
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *errWriter) write(str string) {
|
||||||
|
if w.err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_, w.err = w.writer.Write([]byte(str))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Map fields may have key types of non-float scalars, strings and enums.
|
||||||
|
// The easiest way to sort them in some deterministic order is to use fmt.
|
||||||
|
// If this turns out to be inefficient we can always consider other options,
|
||||||
|
// such as doing a Schwartzian transform.
|
||||||
|
//
|
||||||
|
// Numeric keys are sorted in numeric order per
|
||||||
|
// https://developers.google.com/protocol-buffers/docs/proto#maps.
|
||||||
|
type mapKeys []reflect.Value
|
||||||
|
|
||||||
|
func (s mapKeys) Len() int { return len(s) }
|
||||||
|
func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
func (s mapKeys) Less(i, j int) bool {
|
||||||
|
if k := s[i].Kind(); k == s[j].Kind() {
|
||||||
|
switch k {
|
||||||
|
case reflect.Int32, reflect.Int64:
|
||||||
|
return s[i].Int() < s[j].Int()
|
||||||
|
case reflect.Uint32, reflect.Uint64:
|
||||||
|
return s[i].Uint() < s[j].Uint()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())
|
||||||
|
}
|
11
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
11
vendor/github.com/golang/protobuf/proto/encode.go
generated
vendored
|
@ -1075,10 +1075,17 @@ func (o *Buffer) enc_map(p *Properties, base structPointer) error {
|
||||||
|
|
||||||
func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
|
func (o *Buffer) enc_exts(p *Properties, base structPointer) error {
|
||||||
exts := structPointer_Extensions(base, p.field)
|
exts := structPointer_Extensions(base, p.field)
|
||||||
if err := encodeExtensions(exts); err != nil {
|
|
||||||
|
v, mu := exts.extensionsRead()
|
||||||
|
if v == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
if err := encodeExtensionsMap(v); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
v, _ := exts.extensionsRead()
|
|
||||||
|
|
||||||
return o.enc_map_body(v)
|
return o.enc_map_body(v)
|
||||||
}
|
}
|
||||||
|
|
1
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
1
vendor/github.com/golang/protobuf/proto/extensions.go
generated
vendored
|
@ -154,6 +154,7 @@ type ExtensionDesc struct {
|
||||||
Field int32 // field number
|
Field int32 // field number
|
||||||
Name string // fully-qualified name of extension, for text formatting
|
Name string // fully-qualified name of extension, for text formatting
|
||||||
Tag string // protobuf tag style
|
Tag string // protobuf tag style
|
||||||
|
Filename string // name of the file in which the extension is defined
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ed *ExtensionDesc) repeated() bool {
|
func (ed *ExtensionDesc) repeated() bool {
|
||||||
|
|
1
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
1
vendor/github.com/golang/protobuf/proto/lib.go
generated
vendored
|
@ -73,7 +73,6 @@ for a protocol buffer variable v:
|
||||||
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
||||||
|
|
||||||
- Non-repeated fields of non-message type are values instead of pointers.
|
- Non-repeated fields of non-message type are values instead of pointers.
|
||||||
- Getters are only generated for message and oneof fields.
|
|
||||||
- Enum types do not get an Enum method.
|
- Enum types do not get an Enum method.
|
||||||
|
|
||||||
The simplest way to describe this is to see an example.
|
The simplest way to describe this is to see an example.
|
||||||
|
|
484
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
Normal file
484
vendor/github.com/golang/protobuf/proto/pointer_reflect.go
generated
vendored
Normal file
|
@ -0,0 +1,484 @@
|
||||||
|
// Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
//
|
||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// https://github.com/golang/protobuf
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
// +build appengine js
|
||||||
|
|
||||||
|
// This file contains an implementation of proto field accesses using package reflect.
|
||||||
|
// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
|
||||||
|
// be used on App Engine.
|
||||||
|
|
||||||
|
package proto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"reflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A structPointer is a pointer to a struct.
|
||||||
|
type structPointer struct {
|
||||||
|
v reflect.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// toStructPointer returns a structPointer equivalent to the given reflect value.
|
||||||
|
// The reflect value must itself be a pointer to a struct.
|
||||||
|
func toStructPointer(v reflect.Value) structPointer {
|
||||||
|
return structPointer{v}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNil reports whether p is nil.
|
||||||
|
func structPointer_IsNil(p structPointer) bool {
|
||||||
|
return p.v.IsNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Interface returns the struct pointer as an interface value.
|
||||||
|
func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
|
||||||
|
return p.v.Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
// A field identifies a field in a struct, accessible from a structPointer.
|
||||||
|
// In this implementation, a field is identified by the sequence of field indices
|
||||||
|
// passed to reflect's FieldByIndex.
|
||||||
|
type field []int
|
||||||
|
|
||||||
|
// toField returns a field equivalent to the given reflect field.
|
||||||
|
func toField(f *reflect.StructField) field {
|
||||||
|
return f.Index
|
||||||
|
}
|
||||||
|
|
||||||
|
// invalidField is an invalid field identifier.
|
||||||
|
var invalidField = field(nil)
|
||||||
|
|
||||||
|
// IsValid reports whether the field identifier is valid.
|
||||||
|
func (f field) IsValid() bool { return f != nil }
|
||||||
|
|
||||||
|
// field returns the given field in the struct as a reflect value.
|
||||||
|
func structPointer_field(p structPointer, f field) reflect.Value {
|
||||||
|
// Special case: an extension map entry with a value of type T
|
||||||
|
// passes a *T to the struct-handling code with a zero field,
|
||||||
|
// expecting that it will be treated as equivalent to *struct{ X T },
|
||||||
|
// which has the same memory layout. We have to handle that case
|
||||||
|
// specially, because reflect will panic if we call FieldByIndex on a
|
||||||
|
// non-struct.
|
||||||
|
if f == nil {
|
||||||
|
return p.v.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
return p.v.Elem().FieldByIndex(f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ifield returns the given field in the struct as an interface value.
|
||||||
|
func structPointer_ifield(p structPointer, f field) interface{} {
|
||||||
|
return structPointer_field(p, f).Addr().Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes returns the address of a []byte field in the struct.
|
||||||
|
func structPointer_Bytes(p structPointer, f field) *[]byte {
|
||||||
|
return structPointer_ifield(p, f).(*[]byte)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesSlice returns the address of a [][]byte field in the struct.
|
||||||
|
func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
|
||||||
|
return structPointer_ifield(p, f).(*[][]byte)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bool returns the address of a *bool field in the struct.
|
||||||
|
func structPointer_Bool(p structPointer, f field) **bool {
|
||||||
|
return structPointer_ifield(p, f).(**bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolVal returns the address of a bool field in the struct.
|
||||||
|
func structPointer_BoolVal(p structPointer, f field) *bool {
|
||||||
|
return structPointer_ifield(p, f).(*bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolSlice returns the address of a []bool field in the struct.
|
||||||
|
func structPointer_BoolSlice(p structPointer, f field) *[]bool {
|
||||||
|
return structPointer_ifield(p, f).(*[]bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the address of a *string field in the struct.
|
||||||
|
func structPointer_String(p structPointer, f field) **string {
|
||||||
|
return structPointer_ifield(p, f).(**string)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringVal returns the address of a string field in the struct.
|
||||||
|
func structPointer_StringVal(p structPointer, f field) *string {
|
||||||
|
return structPointer_ifield(p, f).(*string)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringSlice returns the address of a []string field in the struct.
|
||||||
|
func structPointer_StringSlice(p structPointer, f field) *[]string {
|
||||||
|
return structPointer_ifield(p, f).(*[]string)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extensions returns the address of an extension map field in the struct.
|
||||||
|
func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions {
|
||||||
|
return structPointer_ifield(p, f).(*XXX_InternalExtensions)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExtMap returns the address of an extension map field in the struct.
|
||||||
|
func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
|
||||||
|
return structPointer_ifield(p, f).(*map[int32]Extension)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAt returns the reflect.Value for a pointer to a field in the struct.
|
||||||
|
func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
|
||||||
|
return structPointer_field(p, f).Addr()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetStructPointer writes a *struct field in the struct.
|
||||||
|
func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
|
||||||
|
structPointer_field(p, f).Set(q.v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetStructPointer reads a *struct field in the struct.
|
||||||
|
func structPointer_GetStructPointer(p structPointer, f field) structPointer {
|
||||||
|
return structPointer{structPointer_field(p, f)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StructPointerSlice the address of a []*struct field in the struct.
|
||||||
|
func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
|
||||||
|
return structPointerSlice{structPointer_field(p, f)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A structPointerSlice represents the address of a slice of pointers to structs
|
||||||
|
// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
|
||||||
|
type structPointerSlice struct {
|
||||||
|
v reflect.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p structPointerSlice) Len() int { return p.v.Len() }
|
||||||
|
func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
|
||||||
|
func (p structPointerSlice) Append(q structPointer) {
|
||||||
|
p.v.Set(reflect.Append(p.v, q.v))
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
int32Type = reflect.TypeOf(int32(0))
|
||||||
|
uint32Type = reflect.TypeOf(uint32(0))
|
||||||
|
float32Type = reflect.TypeOf(float32(0))
|
||||||
|
int64Type = reflect.TypeOf(int64(0))
|
||||||
|
uint64Type = reflect.TypeOf(uint64(0))
|
||||||
|
float64Type = reflect.TypeOf(float64(0))
|
||||||
|
)
|
||||||
|
|
||||||
|
// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
|
||||||
|
// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
|
||||||
|
type word32 struct {
|
||||||
|
v reflect.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsNil reports whether p is nil.
|
||||||
|
func word32_IsNil(p word32) bool {
|
||||||
|
return p.v.IsNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets p to point at a newly allocated word with bits set to x.
|
||||||
|
func word32_Set(p word32, o *Buffer, x uint32) {
|
||||||
|
t := p.v.Type().Elem()
|
||||||
|
switch t {
|
||||||
|
case int32Type:
|
||||||
|
if len(o.int32s) == 0 {
|
||||||
|
o.int32s = make([]int32, uint32PoolSize)
|
||||||
|
}
|
||||||
|
o.int32s[0] = int32(x)
|
||||||
|
p.v.Set(reflect.ValueOf(&o.int32s[0]))
|
||||||
|
o.int32s = o.int32s[1:]
|
||||||
|
return
|
||||||
|
case uint32Type:
|
||||||
|
if len(o.uint32s) == 0 {
|
||||||
|
o.uint32s = make([]uint32, uint32PoolSize)
|
||||||
|
}
|
||||||
|
o.uint32s[0] = x
|
||||||
|
p.v.Set(reflect.ValueOf(&o.uint32s[0]))
|
||||||
|
o.uint32s = o.uint32s[1:]
|
||||||
|
return
|
||||||
|
case float32Type:
|
||||||
|
if len(o.float32s) == 0 {
|
||||||
|
o.float32s = make([]float32, uint32PoolSize)
|
||||||
|
}
|
||||||
|
o.float32s[0] = math.Float32frombits(x)
|
||||||
|
p.v.Set(reflect.ValueOf(&o.float32s[0]))
|
||||||
|
o.float32s = o.float32s[1:]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// must be enum
|
||||||
|
p.v.Set(reflect.New(t))
|
||||||
|
p.v.Elem().SetInt(int64(int32(x)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get gets the bits pointed at by p, as a uint32.
|
||||||
|
func word32_Get(p word32) uint32 {
|
||||||
|
elem := p.v.Elem()
|
||||||
|
switch elem.Kind() {
|
||||||
|
case reflect.Int32:
|
||||||
|
return uint32(elem.Int())
|
||||||
|
case reflect.Uint32:
|
||||||
|
return uint32(elem.Uint())
|
||||||
|
case reflect.Float32:
|
||||||
|
return math.Float32bits(float32(elem.Float()))
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
|
||||||
|
func structPointer_Word32(p structPointer, f field) word32 {
|
||||||
|
return word32{structPointer_field(p, f)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A word32Val represents a field of type int32, uint32, float32, or enum.
|
||||||
|
// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
|
||||||
|
type word32Val struct {
|
||||||
|
v reflect.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets *p to x.
|
||||||
|
func word32Val_Set(p word32Val, x uint32) {
|
||||||
|
switch p.v.Type() {
|
||||||
|
case int32Type:
|
||||||
|
p.v.SetInt(int64(x))
|
||||||
|
return
|
||||||
|
case uint32Type:
|
||||||
|
p.v.SetUint(uint64(x))
|
||||||
|
return
|
||||||
|
case float32Type:
|
||||||
|
p.v.SetFloat(float64(math.Float32frombits(x)))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// must be enum
|
||||||
|
p.v.SetInt(int64(int32(x)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get gets the bits pointed at by p, as a uint32.
|
||||||
|
func word32Val_Get(p word32Val) uint32 {
|
||||||
|
elem := p.v
|
||||||
|
switch elem.Kind() {
|
||||||
|
case reflect.Int32:
|
||||||
|
return uint32(elem.Int())
|
||||||
|
case reflect.Uint32:
|
||||||
|
return uint32(elem.Uint())
|
||||||
|
case reflect.Float32:
|
||||||
|
return math.Float32bits(float32(elem.Float()))
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
|
||||||
|
func structPointer_Word32Val(p structPointer, f field) word32Val {
|
||||||
|
return word32Val{structPointer_field(p, f)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A word32Slice is a slice of 32-bit values.
|
||||||
|
// That is, v.Type() is []int32, []uint32, []float32, or []enum.
|
||||||
|
type word32Slice struct {
|
||||||
|
v reflect.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p word32Slice) Append(x uint32) {
|
||||||
|
n, m := p.v.Len(), p.v.Cap()
|
||||||
|
if n < m {
|
||||||
|
p.v.SetLen(n + 1)
|
||||||
|
} else {
|
||||||
|
t := p.v.Type().Elem()
|
||||||
|
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
|
||||||
|
}
|
||||||
|
elem := p.v.Index(n)
|
||||||
|
switch elem.Kind() {
|
||||||
|
case reflect.Int32:
|
||||||
|
elem.SetInt(int64(int32(x)))
|
||||||
|
case reflect.Uint32:
|
||||||
|
elem.SetUint(uint64(x))
|
||||||
|
case reflect.Float32:
|
||||||
|
elem.SetFloat(float64(math.Float32frombits(x)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p word32Slice) Len() int {
|
||||||
|
return p.v.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p word32Slice) Index(i int) uint32 {
|
||||||
|
elem := p.v.Index(i)
|
||||||
|
switch elem.Kind() {
|
||||||
|
case reflect.Int32:
|
||||||
|
return uint32(elem.Int())
|
||||||
|
case reflect.Uint32:
|
||||||
|
return uint32(elem.Uint())
|
||||||
|
case reflect.Float32:
|
||||||
|
return math.Float32bits(float32(elem.Float()))
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
|
||||||
|
func structPointer_Word32Slice(p structPointer, f field) word32Slice {
|
||||||
|
return word32Slice{structPointer_field(p, f)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// word64 is like word32 but for 64-bit values.
|
||||||
|
type word64 struct {
|
||||||
|
v reflect.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func word64_Set(p word64, o *Buffer, x uint64) {
|
||||||
|
t := p.v.Type().Elem()
|
||||||
|
switch t {
|
||||||
|
case int64Type:
|
||||||
|
if len(o.int64s) == 0 {
|
||||||
|
o.int64s = make([]int64, uint64PoolSize)
|
||||||
|
}
|
||||||
|
o.int64s[0] = int64(x)
|
||||||
|
p.v.Set(reflect.ValueOf(&o.int64s[0]))
|
||||||
|
o.int64s = o.int64s[1:]
|
||||||
|
return
|
||||||
|
case uint64Type:
|
||||||
|
if len(o.uint64s) == 0 {
|
||||||
|
o.uint64s = make([]uint64, uint64PoolSize)
|
||||||
|
}
|
||||||
|
o.uint64s[0] = x
|
||||||
|
p.v.Set(reflect.ValueOf(&o.uint64s[0]))
|
||||||
|
o.uint64s = o.uint64s[1:]
|
||||||
|
return
|
||||||
|
case float64Type:
|
||||||
|
if len(o.float64s) == 0 {
|
||||||
|
o.float64s = make([]float64, uint64PoolSize)
|
||||||
|
}
|
||||||
|
o.float64s[0] = math.Float64frombits(x)
|
||||||
|
p.v.Set(reflect.ValueOf(&o.float64s[0]))
|
||||||
|
o.float64s = o.float64s[1:]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
func word64_IsNil(p word64) bool {
|
||||||
|
return p.v.IsNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
func word64_Get(p word64) uint64 {
|
||||||
|
elem := p.v.Elem()
|
||||||
|
switch elem.Kind() {
|
||||||
|
case reflect.Int64:
|
||||||
|
return uint64(elem.Int())
|
||||||
|
case reflect.Uint64:
|
||||||
|
return elem.Uint()
|
||||||
|
case reflect.Float64:
|
||||||
|
return math.Float64bits(elem.Float())
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
func structPointer_Word64(p structPointer, f field) word64 {
|
||||||
|
return word64{structPointer_field(p, f)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// word64Val is like word32Val but for 64-bit values.
|
||||||
|
type word64Val struct {
|
||||||
|
v reflect.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func word64Val_Set(p word64Val, o *Buffer, x uint64) {
|
||||||
|
switch p.v.Type() {
|
||||||
|
case int64Type:
|
||||||
|
p.v.SetInt(int64(x))
|
||||||
|
return
|
||||||
|
case uint64Type:
|
||||||
|
p.v.SetUint(x)
|
||||||
|
return
|
||||||
|
case float64Type:
|
||||||
|
p.v.SetFloat(math.Float64frombits(x))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
func word64Val_Get(p word64Val) uint64 {
|
||||||
|
elem := p.v
|
||||||
|
switch elem.Kind() {
|
||||||
|
case reflect.Int64:
|
||||||
|
return uint64(elem.Int())
|
||||||
|
case reflect.Uint64:
|
||||||
|
return elem.Uint()
|
||||||
|
case reflect.Float64:
|
||||||
|
return math.Float64bits(elem.Float())
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
func structPointer_Word64Val(p structPointer, f field) word64Val {
|
||||||
|
return word64Val{structPointer_field(p, f)}
|
||||||
|
}
|
||||||
|
|
||||||
|
type word64Slice struct {
|
||||||
|
v reflect.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p word64Slice) Append(x uint64) {
|
||||||
|
n, m := p.v.Len(), p.v.Cap()
|
||||||
|
if n < m {
|
||||||
|
p.v.SetLen(n + 1)
|
||||||
|
} else {
|
||||||
|
t := p.v.Type().Elem()
|
||||||
|
p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
|
||||||
|
}
|
||||||
|
elem := p.v.Index(n)
|
||||||
|
switch elem.Kind() {
|
||||||
|
case reflect.Int64:
|
||||||
|
elem.SetInt(int64(int64(x)))
|
||||||
|
case reflect.Uint64:
|
||||||
|
elem.SetUint(uint64(x))
|
||||||
|
case reflect.Float64:
|
||||||
|
elem.SetFloat(float64(math.Float64frombits(x)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p word64Slice) Len() int {
|
||||||
|
return p.v.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p word64Slice) Index(i int) uint64 {
|
||||||
|
elem := p.v.Index(i)
|
||||||
|
switch elem.Kind() {
|
||||||
|
case reflect.Int64:
|
||||||
|
return uint64(elem.Int())
|
||||||
|
case reflect.Uint64:
|
||||||
|
return uint64(elem.Uint())
|
||||||
|
case reflect.Float64:
|
||||||
|
return math.Float64bits(float64(elem.Float()))
|
||||||
|
}
|
||||||
|
panic("unreachable")
|
||||||
|
}
|
||||||
|
|
||||||
|
func structPointer_Word64Slice(p structPointer, f field) word64Slice {
|
||||||
|
return word64Slice{structPointer_field(p, f)}
|
||||||
|
}
|
6
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
6
vendor/github.com/golang/protobuf/proto/text_parser.go
generated
vendored
|
@ -592,7 +592,11 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
|
||||||
props = oop.Prop
|
props = oop.Prop
|
||||||
nv := reflect.New(oop.Type.Elem())
|
nv := reflect.New(oop.Type.Elem())
|
||||||
dst = nv.Elem().Field(0)
|
dst = nv.Elem().Field(0)
|
||||||
sv.Field(oop.Field).Set(nv)
|
field := sv.Field(oop.Field)
|
||||||
|
if !field.IsNil() {
|
||||||
|
return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
|
||||||
|
}
|
||||||
|
field.Set(nv)
|
||||||
}
|
}
|
||||||
if !dst.IsValid() {
|
if !dst.IsValid() {
|
||||||
return p.errorf("unknown field name %q in %v", name, st)
|
return p.errorf("unknown field name %q in %v", name, st)
|
||||||
|
|
36
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile
generated
vendored
Normal file
36
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
# Go support for Protocol Buffers - Google's data interchange format
|
||||||
|
#
|
||||||
|
# Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
# https://github.com/golang/protobuf
|
||||||
|
#
|
||||||
|
# Redistribution and use in source and binary forms, with or without
|
||||||
|
# modification, are permitted provided that the following conditions are
|
||||||
|
# met:
|
||||||
|
#
|
||||||
|
# * Redistributions of source code must retain the above copyright
|
||||||
|
# notice, this list of conditions and the following disclaimer.
|
||||||
|
# * Redistributions in binary form must reproduce the above
|
||||||
|
# copyright notice, this list of conditions and the following disclaimer
|
||||||
|
# in the documentation and/or other materials provided with the
|
||||||
|
# distribution.
|
||||||
|
# * Neither the name of Google Inc. nor the names of its
|
||||||
|
# contributors may be used to endorse or promote products derived from
|
||||||
|
# this software without specific prior written permission.
|
||||||
|
#
|
||||||
|
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
# Not stored here, but descriptor.proto is in https://github.com/google/protobuf/
|
||||||
|
# at src/google/protobuf/descriptor.proto
|
||||||
|
regenerate:
|
||||||
|
@echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION
|
||||||
|
protoc --go_out=../../../../.. -I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto
|
2152
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
generated
vendored
Normal file
2152
vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
155
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
Normal file
155
vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,155 @@
|
||||||
|
// Code generated by protoc-gen-go.
|
||||||
|
// source: github.com/golang/protobuf/ptypes/any/any.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package any is a generated protocol buffer package.
|
||||||
|
|
||||||
|
It is generated from these files:
|
||||||
|
github.com/golang/protobuf/ptypes/any/any.proto
|
||||||
|
|
||||||
|
It has these top-level messages:
|
||||||
|
Any
|
||||||
|
*/
|
||||||
|
package any
|
||||||
|
|
||||||
|
import proto "github.com/golang/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
|
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||||
|
// URL that describes the type of the serialized message.
|
||||||
|
//
|
||||||
|
// Protobuf library provides support to pack/unpack Any values in the form
|
||||||
|
// of utility functions or additional generated methods of the Any type.
|
||||||
|
//
|
||||||
|
// Example 1: Pack and unpack a message in C++.
|
||||||
|
//
|
||||||
|
// Foo foo = ...;
|
||||||
|
// Any any;
|
||||||
|
// any.PackFrom(foo);
|
||||||
|
// ...
|
||||||
|
// if (any.UnpackTo(&foo)) {
|
||||||
|
// ...
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Example 2: Pack and unpack a message in Java.
|
||||||
|
//
|
||||||
|
// Foo foo = ...;
|
||||||
|
// Any any = Any.pack(foo);
|
||||||
|
// ...
|
||||||
|
// if (any.is(Foo.class)) {
|
||||||
|
// foo = any.unpack(Foo.class);
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Example 3: Pack and unpack a message in Python.
|
||||||
|
//
|
||||||
|
// foo = Foo(...)
|
||||||
|
// any = Any()
|
||||||
|
// any.Pack(foo)
|
||||||
|
// ...
|
||||||
|
// if any.Is(Foo.DESCRIPTOR):
|
||||||
|
// any.Unpack(foo)
|
||||||
|
// ...
|
||||||
|
//
|
||||||
|
// The pack methods provided by protobuf library will by default use
|
||||||
|
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
||||||
|
// methods only use the fully qualified type name after the last '/'
|
||||||
|
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
||||||
|
// name "y.z".
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// JSON
|
||||||
|
// ====
|
||||||
|
// The JSON representation of an `Any` value uses the regular
|
||||||
|
// representation of the deserialized, embedded message, with an
|
||||||
|
// additional field `@type` which contains the type URL. Example:
|
||||||
|
//
|
||||||
|
// package google.profile;
|
||||||
|
// message Person {
|
||||||
|
// string first_name = 1;
|
||||||
|
// string last_name = 2;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// "@type": "type.googleapis.com/google.profile.Person",
|
||||||
|
// "firstName": <string>,
|
||||||
|
// "lastName": <string>
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// If the embedded message type is well-known and has a custom JSON
|
||||||
|
// representation, that representation will be embedded adding a field
|
||||||
|
// `value` which holds the custom JSON in addition to the `@type`
|
||||||
|
// field. Example (for message [google.protobuf.Duration][]):
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
||||||
|
// "value": "1.212s"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
type Any struct {
|
||||||
|
// A URL/resource name whose content describes the type of the
|
||||||
|
// serialized protocol buffer message.
|
||||||
|
//
|
||||||
|
// For URLs which use the scheme `http`, `https`, or no scheme, the
|
||||||
|
// following restrictions and interpretations apply:
|
||||||
|
//
|
||||||
|
// * If no scheme is provided, `https` is assumed.
|
||||||
|
// * The last segment of the URL's path must represent the fully
|
||||||
|
// qualified name of the type (as in `path/google.protobuf.Duration`).
|
||||||
|
// The name should be in a canonical form (e.g., leading "." is
|
||||||
|
// not accepted).
|
||||||
|
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||||
|
// value in binary format, or produce an error.
|
||||||
|
// * Applications are allowed to cache lookup results based on the
|
||||||
|
// URL, or have them precompiled into a binary to avoid any
|
||||||
|
// lookup. Therefore, binary compatibility needs to be preserved
|
||||||
|
// on changes to types. (Use versioned type names to manage
|
||||||
|
// breaking changes.)
|
||||||
|
//
|
||||||
|
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||||
|
// used with implementation specific semantics.
|
||||||
|
//
|
||||||
|
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"`
|
||||||
|
// Must be a valid serialized protocol buffer of the above specified type.
|
||||||
|
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Any) Reset() { *m = Any{} }
|
||||||
|
func (m *Any) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Any) ProtoMessage() {}
|
||||||
|
func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||||
|
func (*Any) XXX_WellKnownType() string { return "Any" }
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) }
|
||||||
|
|
||||||
|
var fileDescriptor0 = []byte{
|
||||||
|
// 187 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9,
|
||||||
|
0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
|
||||||
|
0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc,
|
||||||
|
0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c,
|
||||||
|
0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69,
|
||||||
|
0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24,
|
||||||
|
0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1,
|
||||||
|
0x38, 0x15, 0x71, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19,
|
||||||
|
0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x05, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd,
|
||||||
|
0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0b, 0x80, 0xaa, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9,
|
||||||
|
0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4e, 0x62, 0x03, 0x6b, 0x37, 0x06, 0x04, 0x00,
|
||||||
|
0x00, 0xff, 0xff, 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00,
|
||||||
|
}
|
140
vendor/github.com/golang/protobuf/ptypes/any/any.proto
generated
vendored
Normal file
140
vendor/github.com/golang/protobuf/ptypes/any/any.proto
generated
vendored
Normal file
|
@ -0,0 +1,140 @@
|
||||||
|
// Protocol Buffers - Google's data interchange format
|
||||||
|
// Copyright 2008 Google Inc. All rights reserved.
|
||||||
|
// https://developers.google.com/protocol-buffers/
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package google.protobuf;
|
||||||
|
|
||||||
|
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||||
|
option go_package = "github.com/golang/protobuf/ptypes/any";
|
||||||
|
option java_package = "com.google.protobuf";
|
||||||
|
option java_outer_classname = "AnyProto";
|
||||||
|
option java_multiple_files = true;
|
||||||
|
option java_generate_equals_and_hash = true;
|
||||||
|
option objc_class_prefix = "GPB";
|
||||||
|
|
||||||
|
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||||
|
// URL that describes the type of the serialized message.
|
||||||
|
//
|
||||||
|
// Protobuf library provides support to pack/unpack Any values in the form
|
||||||
|
// of utility functions or additional generated methods of the Any type.
|
||||||
|
//
|
||||||
|
// Example 1: Pack and unpack a message in C++.
|
||||||
|
//
|
||||||
|
// Foo foo = ...;
|
||||||
|
// Any any;
|
||||||
|
// any.PackFrom(foo);
|
||||||
|
// ...
|
||||||
|
// if (any.UnpackTo(&foo)) {
|
||||||
|
// ...
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Example 2: Pack and unpack a message in Java.
|
||||||
|
//
|
||||||
|
// Foo foo = ...;
|
||||||
|
// Any any = Any.pack(foo);
|
||||||
|
// ...
|
||||||
|
// if (any.is(Foo.class)) {
|
||||||
|
// foo = any.unpack(Foo.class);
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Example 3: Pack and unpack a message in Python.
|
||||||
|
//
|
||||||
|
// foo = Foo(...)
|
||||||
|
// any = Any()
|
||||||
|
// any.Pack(foo)
|
||||||
|
// ...
|
||||||
|
// if any.Is(Foo.DESCRIPTOR):
|
||||||
|
// any.Unpack(foo)
|
||||||
|
// ...
|
||||||
|
//
|
||||||
|
// The pack methods provided by protobuf library will by default use
|
||||||
|
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
||||||
|
// methods only use the fully qualified type name after the last '/'
|
||||||
|
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
||||||
|
// name "y.z".
|
||||||
|
//
|
||||||
|
//
|
||||||
|
// JSON
|
||||||
|
// ====
|
||||||
|
// The JSON representation of an `Any` value uses the regular
|
||||||
|
// representation of the deserialized, embedded message, with an
|
||||||
|
// additional field `@type` which contains the type URL. Example:
|
||||||
|
//
|
||||||
|
// package google.profile;
|
||||||
|
// message Person {
|
||||||
|
// string first_name = 1;
|
||||||
|
// string last_name = 2;
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// "@type": "type.googleapis.com/google.profile.Person",
|
||||||
|
// "firstName": <string>,
|
||||||
|
// "lastName": <string>
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// If the embedded message type is well-known and has a custom JSON
|
||||||
|
// representation, that representation will be embedded adding a field
|
||||||
|
// `value` which holds the custom JSON in addition to the `@type`
|
||||||
|
// field. Example (for message [google.protobuf.Duration][]):
|
||||||
|
//
|
||||||
|
// {
|
||||||
|
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
||||||
|
// "value": "1.212s"
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
message Any {
|
||||||
|
// A URL/resource name whose content describes the type of the
|
||||||
|
// serialized protocol buffer message.
|
||||||
|
//
|
||||||
|
// For URLs which use the scheme `http`, `https`, or no scheme, the
|
||||||
|
// following restrictions and interpretations apply:
|
||||||
|
//
|
||||||
|
// * If no scheme is provided, `https` is assumed.
|
||||||
|
// * The last segment of the URL's path must represent the fully
|
||||||
|
// qualified name of the type (as in `path/google.protobuf.Duration`).
|
||||||
|
// The name should be in a canonical form (e.g., leading "." is
|
||||||
|
// not accepted).
|
||||||
|
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||||
|
// value in binary format, or produce an error.
|
||||||
|
// * Applications are allowed to cache lookup results based on the
|
||||||
|
// URL, or have them precompiled into a binary to avoid any
|
||||||
|
// lookup. Therefore, binary compatibility needs to be preserved
|
||||||
|
// on changes to types. (Use versioned type names to manage
|
||||||
|
// breaking changes.)
|
||||||
|
//
|
||||||
|
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||||
|
// used with implementation specific semantics.
|
||||||
|
//
|
||||||
|
string type_url = 1;
|
||||||
|
|
||||||
|
// Must be a valid serialized protocol buffer of the above specified type.
|
||||||
|
bytes value = 2;
|
||||||
|
}
|
382
vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
generated
vendored
Normal file
382
vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,382 @@
|
||||||
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
|
// source: github.com/golang/protobuf/ptypes/struct/struct.proto
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package structpb is a generated protocol buffer package.
|
||||||
|
|
||||||
|
It is generated from these files:
|
||||||
|
github.com/golang/protobuf/ptypes/struct/struct.proto
|
||||||
|
|
||||||
|
It has these top-level messages:
|
||||||
|
Struct
|
||||||
|
Value
|
||||||
|
ListValue
|
||||||
|
*/
|
||||||
|
package structpb
|
||||||
|
|
||||||
|
import proto "github.com/golang/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
|
// `NullValue` is a singleton enumeration to represent the null value for the
|
||||||
|
// `Value` type union.
|
||||||
|
//
|
||||||
|
// The JSON representation for `NullValue` is JSON `null`.
|
||||||
|
type NullValue int32
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Null value.
|
||||||
|
NullValue_NULL_VALUE NullValue = 0
|
||||||
|
)
|
||||||
|
|
||||||
|
var NullValue_name = map[int32]string{
|
||||||
|
0: "NULL_VALUE",
|
||||||
|
}
|
||||||
|
var NullValue_value = map[string]int32{
|
||||||
|
"NULL_VALUE": 0,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x NullValue) String() string {
|
||||||
|
return proto.EnumName(NullValue_name, int32(x))
|
||||||
|
}
|
||||||
|
func (NullValue) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||||
|
func (NullValue) XXX_WellKnownType() string { return "NullValue" }
|
||||||
|
|
||||||
|
// `Struct` represents a structured data value, consisting of fields
|
||||||
|
// which map to dynamically typed values. In some languages, `Struct`
|
||||||
|
// might be supported by a native representation. For example, in
|
||||||
|
// scripting languages like JS a struct is represented as an
|
||||||
|
// object. The details of that representation are described together
|
||||||
|
// with the proto support for the language.
|
||||||
|
//
|
||||||
|
// The JSON representation for `Struct` is JSON object.
|
||||||
|
type Struct struct {
|
||||||
|
// Unordered map of dynamically typed values.
|
||||||
|
Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Struct) Reset() { *m = Struct{} }
|
||||||
|
func (m *Struct) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Struct) ProtoMessage() {}
|
||||||
|
func (*Struct) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||||
|
func (*Struct) XXX_WellKnownType() string { return "Struct" }
|
||||||
|
|
||||||
|
func (m *Struct) GetFields() map[string]*Value {
|
||||||
|
if m != nil {
|
||||||
|
return m.Fields
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// `Value` represents a dynamically typed value which can be either
|
||||||
|
// null, a number, a string, a boolean, a recursive struct value, or a
|
||||||
|
// list of values. A producer of value is expected to set one of that
|
||||||
|
// variants, absence of any variant indicates an error.
|
||||||
|
//
|
||||||
|
// The JSON representation for `Value` is JSON value.
|
||||||
|
type Value struct {
|
||||||
|
// The kind of value.
|
||||||
|
//
|
||||||
|
// Types that are valid to be assigned to Kind:
|
||||||
|
// *Value_NullValue
|
||||||
|
// *Value_NumberValue
|
||||||
|
// *Value_StringValue
|
||||||
|
// *Value_BoolValue
|
||||||
|
// *Value_StructValue
|
||||||
|
// *Value_ListValue
|
||||||
|
Kind isValue_Kind `protobuf_oneof:"kind"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Value) Reset() { *m = Value{} }
|
||||||
|
func (m *Value) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*Value) ProtoMessage() {}
|
||||||
|
func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
|
||||||
|
func (*Value) XXX_WellKnownType() string { return "Value" }
|
||||||
|
|
||||||
|
type isValue_Kind interface {
|
||||||
|
isValue_Kind()
|
||||||
|
}
|
||||||
|
|
||||||
|
type Value_NullValue struct {
|
||||||
|
NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,enum=google.protobuf.NullValue,oneof"`
|
||||||
|
}
|
||||||
|
type Value_NumberValue struct {
|
||||||
|
NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,oneof"`
|
||||||
|
}
|
||||||
|
type Value_StringValue struct {
|
||||||
|
StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,oneof"`
|
||||||
|
}
|
||||||
|
type Value_BoolValue struct {
|
||||||
|
BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,oneof"`
|
||||||
|
}
|
||||||
|
type Value_StructValue struct {
|
||||||
|
StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,oneof"`
|
||||||
|
}
|
||||||
|
type Value_ListValue struct {
|
||||||
|
ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,oneof"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*Value_NullValue) isValue_Kind() {}
|
||||||
|
func (*Value_NumberValue) isValue_Kind() {}
|
||||||
|
func (*Value_StringValue) isValue_Kind() {}
|
||||||
|
func (*Value_BoolValue) isValue_Kind() {}
|
||||||
|
func (*Value_StructValue) isValue_Kind() {}
|
||||||
|
func (*Value_ListValue) isValue_Kind() {}
|
||||||
|
|
||||||
|
func (m *Value) GetKind() isValue_Kind {
|
||||||
|
if m != nil {
|
||||||
|
return m.Kind
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Value) GetNullValue() NullValue {
|
||||||
|
if x, ok := m.GetKind().(*Value_NullValue); ok {
|
||||||
|
return x.NullValue
|
||||||
|
}
|
||||||
|
return NullValue_NULL_VALUE
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Value) GetNumberValue() float64 {
|
||||||
|
if x, ok := m.GetKind().(*Value_NumberValue); ok {
|
||||||
|
return x.NumberValue
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Value) GetStringValue() string {
|
||||||
|
if x, ok := m.GetKind().(*Value_StringValue); ok {
|
||||||
|
return x.StringValue
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Value) GetBoolValue() bool {
|
||||||
|
if x, ok := m.GetKind().(*Value_BoolValue); ok {
|
||||||
|
return x.BoolValue
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Value) GetStructValue() *Struct {
|
||||||
|
if x, ok := m.GetKind().(*Value_StructValue); ok {
|
||||||
|
return x.StructValue
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *Value) GetListValue() *ListValue {
|
||||||
|
if x, ok := m.GetKind().(*Value_ListValue); ok {
|
||||||
|
return x.ListValue
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX_OneofFuncs is for the internal use of the proto package.
|
||||||
|
func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
||||||
|
return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{
|
||||||
|
(*Value_NullValue)(nil),
|
||||||
|
(*Value_NumberValue)(nil),
|
||||||
|
(*Value_StringValue)(nil),
|
||||||
|
(*Value_BoolValue)(nil),
|
||||||
|
(*Value_StructValue)(nil),
|
||||||
|
(*Value_ListValue)(nil),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
||||||
|
m := msg.(*Value)
|
||||||
|
// kind
|
||||||
|
switch x := m.Kind.(type) {
|
||||||
|
case *Value_NullValue:
|
||||||
|
b.EncodeVarint(1<<3 | proto.WireVarint)
|
||||||
|
b.EncodeVarint(uint64(x.NullValue))
|
||||||
|
case *Value_NumberValue:
|
||||||
|
b.EncodeVarint(2<<3 | proto.WireFixed64)
|
||||||
|
b.EncodeFixed64(math.Float64bits(x.NumberValue))
|
||||||
|
case *Value_StringValue:
|
||||||
|
b.EncodeVarint(3<<3 | proto.WireBytes)
|
||||||
|
b.EncodeStringBytes(x.StringValue)
|
||||||
|
case *Value_BoolValue:
|
||||||
|
t := uint64(0)
|
||||||
|
if x.BoolValue {
|
||||||
|
t = 1
|
||||||
|
}
|
||||||
|
b.EncodeVarint(4<<3 | proto.WireVarint)
|
||||||
|
b.EncodeVarint(t)
|
||||||
|
case *Value_StructValue:
|
||||||
|
b.EncodeVarint(5<<3 | proto.WireBytes)
|
||||||
|
if err := b.EncodeMessage(x.StructValue); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case *Value_ListValue:
|
||||||
|
b.EncodeVarint(6<<3 | proto.WireBytes)
|
||||||
|
if err := b.EncodeMessage(x.ListValue); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case nil:
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("Value.Kind has unexpected type %T", x)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
||||||
|
m := msg.(*Value)
|
||||||
|
switch tag {
|
||||||
|
case 1: // kind.null_value
|
||||||
|
if wire != proto.WireVarint {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
x, err := b.DecodeVarint()
|
||||||
|
m.Kind = &Value_NullValue{NullValue(x)}
|
||||||
|
return true, err
|
||||||
|
case 2: // kind.number_value
|
||||||
|
if wire != proto.WireFixed64 {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
x, err := b.DecodeFixed64()
|
||||||
|
m.Kind = &Value_NumberValue{math.Float64frombits(x)}
|
||||||
|
return true, err
|
||||||
|
case 3: // kind.string_value
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
x, err := b.DecodeStringBytes()
|
||||||
|
m.Kind = &Value_StringValue{x}
|
||||||
|
return true, err
|
||||||
|
case 4: // kind.bool_value
|
||||||
|
if wire != proto.WireVarint {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
x, err := b.DecodeVarint()
|
||||||
|
m.Kind = &Value_BoolValue{x != 0}
|
||||||
|
return true, err
|
||||||
|
case 5: // kind.struct_value
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
msg := new(Struct)
|
||||||
|
err := b.DecodeMessage(msg)
|
||||||
|
m.Kind = &Value_StructValue{msg}
|
||||||
|
return true, err
|
||||||
|
case 6: // kind.list_value
|
||||||
|
if wire != proto.WireBytes {
|
||||||
|
return true, proto.ErrInternalBadWireType
|
||||||
|
}
|
||||||
|
msg := new(ListValue)
|
||||||
|
err := b.DecodeMessage(msg)
|
||||||
|
m.Kind = &Value_ListValue{msg}
|
||||||
|
return true, err
|
||||||
|
default:
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func _Value_OneofSizer(msg proto.Message) (n int) {
|
||||||
|
m := msg.(*Value)
|
||||||
|
// kind
|
||||||
|
switch x := m.Kind.(type) {
|
||||||
|
case *Value_NullValue:
|
||||||
|
n += proto.SizeVarint(1<<3 | proto.WireVarint)
|
||||||
|
n += proto.SizeVarint(uint64(x.NullValue))
|
||||||
|
case *Value_NumberValue:
|
||||||
|
n += proto.SizeVarint(2<<3 | proto.WireFixed64)
|
||||||
|
n += 8
|
||||||
|
case *Value_StringValue:
|
||||||
|
n += proto.SizeVarint(3<<3 | proto.WireBytes)
|
||||||
|
n += proto.SizeVarint(uint64(len(x.StringValue)))
|
||||||
|
n += len(x.StringValue)
|
||||||
|
case *Value_BoolValue:
|
||||||
|
n += proto.SizeVarint(4<<3 | proto.WireVarint)
|
||||||
|
n += 1
|
||||||
|
case *Value_StructValue:
|
||||||
|
s := proto.Size(x.StructValue)
|
||||||
|
n += proto.SizeVarint(5<<3 | proto.WireBytes)
|
||||||
|
n += proto.SizeVarint(uint64(s))
|
||||||
|
n += s
|
||||||
|
case *Value_ListValue:
|
||||||
|
s := proto.Size(x.ListValue)
|
||||||
|
n += proto.SizeVarint(6<<3 | proto.WireBytes)
|
||||||
|
n += proto.SizeVarint(uint64(s))
|
||||||
|
n += s
|
||||||
|
case nil:
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// `ListValue` is a wrapper around a repeated field of values.
|
||||||
|
//
|
||||||
|
// The JSON representation for `ListValue` is JSON array.
|
||||||
|
type ListValue struct {
|
||||||
|
// Repeated field of dynamically typed values.
|
||||||
|
Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ListValue) Reset() { *m = ListValue{} }
|
||||||
|
func (m *ListValue) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*ListValue) ProtoMessage() {}
|
||||||
|
func (*ListValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
|
||||||
|
func (*ListValue) XXX_WellKnownType() string { return "ListValue" }
|
||||||
|
|
||||||
|
func (m *ListValue) GetValues() []*Value {
|
||||||
|
if m != nil {
|
||||||
|
return m.Values
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*Struct)(nil), "google.protobuf.Struct")
|
||||||
|
proto.RegisterType((*Value)(nil), "google.protobuf.Value")
|
||||||
|
proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue")
|
||||||
|
proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterFile("github.com/golang/protobuf/ptypes/struct/struct.proto", fileDescriptor0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileDescriptor0 = []byte{
|
||||||
|
// 417 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x8b, 0xd3, 0x40,
|
||||||
|
0x14, 0x80, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa0, 0xa1, 0x7b, 0x09,
|
||||||
|
0x22, 0x09, 0x56, 0x04, 0x31, 0x5e, 0x0c, 0xac, 0xbb, 0x60, 0x58, 0x62, 0x74, 0x57, 0xf0, 0x52,
|
||||||
|
0x9a, 0x34, 0x8d, 0xa1, 0xd3, 0x99, 0x90, 0xcc, 0x28, 0x3d, 0xfa, 0x2f, 0x3c, 0x7b, 0xf4, 0xe8,
|
||||||
|
0xaf, 0xf3, 0x28, 0x33, 0x93, 0x44, 0x69, 0x29, 0x78, 0x9a, 0xbe, 0x37, 0xdf, 0xfb, 0xe6, 0xbd,
|
||||||
|
0xd7, 0xc0, 0xf3, 0xb2, 0xe2, 0x9f, 0x45, 0xe6, 0xe7, 0x6c, 0x13, 0x94, 0x8c, 0x2c, 0x68, 0x19,
|
||||||
|
0xd4, 0x0d, 0xe3, 0x2c, 0x13, 0xab, 0xa0, 0xe6, 0xdb, 0xba, 0x68, 0x83, 0x96, 0x37, 0x22, 0xe7,
|
||||||
|
0xdd, 0xe1, 0xab, 0x5b, 0x7c, 0xa7, 0x64, 0xac, 0x24, 0x85, 0xdf, 0xb3, 0xd3, 0xef, 0x08, 0xac,
|
||||||
|
0xf7, 0x8a, 0xc0, 0x21, 0x58, 0xab, 0xaa, 0x20, 0xcb, 0x76, 0x82, 0x5c, 0xd3, 0x73, 0x66, 0x67,
|
||||||
|
0xfe, 0x0e, 0xec, 0x6b, 0xd0, 0x7f, 0xa3, 0xa8, 0x73, 0xca, 0x9b, 0x6d, 0xda, 0x95, 0x9c, 0xbe,
|
||||||
|
0x03, 0xe7, 0x9f, 0x34, 0x3e, 0x01, 0x73, 0x5d, 0x6c, 0x27, 0xc8, 0x45, 0x9e, 0x9d, 0xca, 0x9f,
|
||||||
|
0xf8, 0x09, 0x8c, 0xbf, 0x2c, 0x88, 0x28, 0x26, 0x86, 0x8b, 0x3c, 0x67, 0x76, 0x6f, 0x4f, 0x7e,
|
||||||
|
0x23, 0x6f, 0x53, 0x0d, 0xbd, 0x34, 0x5e, 0xa0, 0xe9, 0x2f, 0x03, 0xc6, 0x2a, 0x89, 0x43, 0x00,
|
||||||
|
0x2a, 0x08, 0x99, 0x6b, 0x81, 0x94, 0x1e, 0xcf, 0x4e, 0xf7, 0x04, 0x57, 0x82, 0x10, 0xc5, 0x5f,
|
||||||
|
0x8e, 0x52, 0x9b, 0xf6, 0x01, 0x3e, 0x83, 0xdb, 0x54, 0x6c, 0xb2, 0xa2, 0x99, 0xff, 0x7d, 0x1f,
|
||||||
|
0x5d, 0x8e, 0x52, 0x47, 0x67, 0x07, 0xa8, 0xe5, 0x4d, 0x45, 0xcb, 0x0e, 0x32, 0x65, 0xe3, 0x12,
|
||||||
|
0xd2, 0x59, 0x0d, 0x3d, 0x02, 0xc8, 0x18, 0xeb, 0xdb, 0x38, 0x72, 0x91, 0x77, 0x4b, 0x3e, 0x25,
|
||||||
|
0x73, 0x1a, 0x78, 0xa5, 0x2c, 0x22, 0xe7, 0x1d, 0x32, 0x56, 0xa3, 0xde, 0x3f, 0xb0, 0xc7, 0x4e,
|
||||||
|
0x2f, 0x72, 0x3e, 0x4c, 0x49, 0xaa, 0xb6, 0xaf, 0xb5, 0x54, 0xed, 0xfe, 0x94, 0x71, 0xd5, 0xf2,
|
||||||
|
0x61, 0x4a, 0xd2, 0x07, 0x91, 0x05, 0x47, 0xeb, 0x8a, 0x2e, 0xa7, 0x21, 0xd8, 0x03, 0x81, 0x7d,
|
||||||
|
0xb0, 0x94, 0xac, 0xff, 0x47, 0x0f, 0x2d, 0xbd, 0xa3, 0x1e, 0x3f, 0x00, 0x7b, 0x58, 0x22, 0x3e,
|
||||||
|
0x06, 0xb8, 0xba, 0x8e, 0xe3, 0xf9, 0xcd, 0xeb, 0xf8, 0xfa, 0xfc, 0x64, 0x14, 0x7d, 0x43, 0x70,
|
||||||
|
0x37, 0x67, 0x9b, 0x5d, 0x45, 0xe4, 0xe8, 0x69, 0x12, 0x19, 0x27, 0xe8, 0xd3, 0xd3, 0xff, 0xfd,
|
||||||
|
0x30, 0x43, 0x7d, 0xd4, 0xd9, 0x6f, 0x84, 0x7e, 0x18, 0xe6, 0x45, 0x12, 0xfd, 0x34, 0x1e, 0x5e,
|
||||||
|
0x68, 0x79, 0xd2, 0xf7, 0xf7, 0xb1, 0x20, 0xe4, 0x2d, 0x65, 0x5f, 0xe9, 0x07, 0x59, 0x99, 0x59,
|
||||||
|
0x4a, 0xf5, 0xec, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x9b, 0x6e, 0x5d, 0x3c, 0xfe, 0x02, 0x00,
|
||||||
|
0x00,
|
||||||
|
}
|
96
vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
generated
vendored
Normal file
96
vendor/github.com/golang/protobuf/ptypes/struct/struct.proto
generated
vendored
Normal file
|
@ -0,0 +1,96 @@
|
||||||
|
// Protocol Buffers - Google's data interchange format
|
||||||
|
// Copyright 2008 Google Inc. All rights reserved.
|
||||||
|
// https://developers.google.com/protocol-buffers/
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||||
|
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||||
|
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||||
|
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||||
|
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||||
|
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||||
|
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||||
|
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||||
|
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package google.protobuf;
|
||||||
|
|
||||||
|
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||||
|
option cc_enable_arenas = true;
|
||||||
|
option go_package = "github.com/golang/protobuf/ptypes/struct;structpb";
|
||||||
|
option java_package = "com.google.protobuf";
|
||||||
|
option java_outer_classname = "StructProto";
|
||||||
|
option java_multiple_files = true;
|
||||||
|
option objc_class_prefix = "GPB";
|
||||||
|
|
||||||
|
|
||||||
|
// `Struct` represents a structured data value, consisting of fields
|
||||||
|
// which map to dynamically typed values. In some languages, `Struct`
|
||||||
|
// might be supported by a native representation. For example, in
|
||||||
|
// scripting languages like JS a struct is represented as an
|
||||||
|
// object. The details of that representation are described together
|
||||||
|
// with the proto support for the language.
|
||||||
|
//
|
||||||
|
// The JSON representation for `Struct` is JSON object.
|
||||||
|
message Struct {
|
||||||
|
// Unordered map of dynamically typed values.
|
||||||
|
map<string, Value> fields = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// `Value` represents a dynamically typed value which can be either
|
||||||
|
// null, a number, a string, a boolean, a recursive struct value, or a
|
||||||
|
// list of values. A producer of value is expected to set one of that
|
||||||
|
// variants, absence of any variant indicates an error.
|
||||||
|
//
|
||||||
|
// The JSON representation for `Value` is JSON value.
|
||||||
|
message Value {
|
||||||
|
// The kind of value.
|
||||||
|
oneof kind {
|
||||||
|
// Represents a null value.
|
||||||
|
NullValue null_value = 1;
|
||||||
|
// Represents a double value.
|
||||||
|
double number_value = 2;
|
||||||
|
// Represents a string value.
|
||||||
|
string string_value = 3;
|
||||||
|
// Represents a boolean value.
|
||||||
|
bool bool_value = 4;
|
||||||
|
// Represents a structured value.
|
||||||
|
Struct struct_value = 5;
|
||||||
|
// Represents a repeated `Value`.
|
||||||
|
ListValue list_value = 6;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// `NullValue` is a singleton enumeration to represent the null value for the
|
||||||
|
// `Value` type union.
|
||||||
|
//
|
||||||
|
// The JSON representation for `NullValue` is JSON `null`.
|
||||||
|
enum NullValue {
|
||||||
|
// Null value.
|
||||||
|
NULL_VALUE = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
// `ListValue` is a wrapper around a repeated field of values.
|
||||||
|
//
|
||||||
|
// The JSON representation for `ListValue` is JSON array.
|
||||||
|
message ListValue {
|
||||||
|
// Repeated field of dynamically typed values.
|
||||||
|
repeated Value values = 1;
|
||||||
|
}
|
27
vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt
generated
vendored
Normal file
27
vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
Copyright (c) 2015, Gengo, Inc.
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
are permitted provided that the following conditions are met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
* Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer in the documentation
|
||||||
|
and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
* Neither the name of Gengo, Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from this
|
||||||
|
software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||||
|
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||||
|
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||||
|
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||||
|
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
187
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
generated
vendored
Normal file
187
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
generated
vendored
Normal file
|
@ -0,0 +1,187 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MetadataHeaderPrefix is the http prefix that represents custom metadata
|
||||||
|
// parameters to or from a gRPC call.
|
||||||
|
const MetadataHeaderPrefix = "Grpc-Metadata-"
|
||||||
|
|
||||||
|
// MetadataPrefix is the prefix for grpc-gateway supplied custom metadata fields.
|
||||||
|
const MetadataPrefix = "grpcgateway-"
|
||||||
|
|
||||||
|
// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to
|
||||||
|
// HTTP headers in a response handled by grpc-gateway
|
||||||
|
const MetadataTrailerPrefix = "Grpc-Trailer-"
|
||||||
|
|
||||||
|
const metadataGrpcTimeout = "Grpc-Timeout"
|
||||||
|
|
||||||
|
const xForwardedFor = "X-Forwarded-For"
|
||||||
|
const xForwardedHost = "X-Forwarded-Host"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
|
||||||
|
// header isn't present. If the value is 0 the sent `context` will not have a timeout.
|
||||||
|
DefaultContextTimeout = 0 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
/*
|
||||||
|
AnnotateContext adds context information such as metadata from the request.
|
||||||
|
|
||||||
|
At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For",
|
||||||
|
except that the forwarded destination is not another HTTP service but rather
|
||||||
|
a gRPC service.
|
||||||
|
*/
|
||||||
|
func AnnotateContext(ctx context.Context, req *http.Request) (context.Context, error) {
|
||||||
|
var pairs []string
|
||||||
|
timeout := DefaultContextTimeout
|
||||||
|
if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
|
||||||
|
var err error
|
||||||
|
timeout, err = timeoutDecode(tm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, grpc.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, vals := range req.Header {
|
||||||
|
for _, val := range vals {
|
||||||
|
// For backwards-compatibility, pass through 'authorization' header with no prefix.
|
||||||
|
if strings.ToLower(key) == "authorization" {
|
||||||
|
pairs = append(pairs, "authorization", val)
|
||||||
|
}
|
||||||
|
if isPermanentHTTPHeader(key) {
|
||||||
|
pairs = append(pairs, strings.ToLower(fmt.Sprintf("%s%s", MetadataPrefix, key)), val)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if strings.HasPrefix(key, MetadataHeaderPrefix) {
|
||||||
|
pairs = append(pairs, key[len(MetadataHeaderPrefix):], val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if host := req.Header.Get(xForwardedHost); host != "" {
|
||||||
|
pairs = append(pairs, strings.ToLower(xForwardedHost), host)
|
||||||
|
} else if req.Host != "" {
|
||||||
|
pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host)
|
||||||
|
}
|
||||||
|
|
||||||
|
if addr := req.RemoteAddr; addr != "" {
|
||||||
|
if remoteIP, _, err := net.SplitHostPort(addr); err == nil {
|
||||||
|
if fwd := req.Header.Get(xForwardedFor); fwd == "" {
|
||||||
|
pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP)
|
||||||
|
} else {
|
||||||
|
pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
grpclog.Printf("invalid remote addr: %s", addr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if timeout != 0 {
|
||||||
|
ctx, _ = context.WithTimeout(ctx, timeout)
|
||||||
|
}
|
||||||
|
if len(pairs) == 0 {
|
||||||
|
return ctx, nil
|
||||||
|
}
|
||||||
|
return metadata.NewContext(ctx, metadata.Pairs(pairs...)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerMetadata consists of metadata sent from gRPC server.
|
||||||
|
type ServerMetadata struct {
|
||||||
|
HeaderMD metadata.MD
|
||||||
|
TrailerMD metadata.MD
|
||||||
|
}
|
||||||
|
|
||||||
|
type serverMetadataKey struct{}
|
||||||
|
|
||||||
|
// NewServerMetadataContext creates a new context with ServerMetadata
|
||||||
|
func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
|
||||||
|
return context.WithValue(ctx, serverMetadataKey{}, md)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerMetadataFromContext returns the ServerMetadata in ctx
|
||||||
|
func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
|
||||||
|
md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func timeoutDecode(s string) (time.Duration, error) {
|
||||||
|
size := len(s)
|
||||||
|
if size < 2 {
|
||||||
|
return 0, fmt.Errorf("timeout string is too short: %q", s)
|
||||||
|
}
|
||||||
|
d, ok := timeoutUnitToDuration(s[size-1])
|
||||||
|
if !ok {
|
||||||
|
return 0, fmt.Errorf("timeout unit is not recognized: %q", s)
|
||||||
|
}
|
||||||
|
t, err := strconv.ParseInt(s[:size-1], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return d * time.Duration(t), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
|
||||||
|
switch u {
|
||||||
|
case 'H':
|
||||||
|
return time.Hour, true
|
||||||
|
case 'M':
|
||||||
|
return time.Minute, true
|
||||||
|
case 'S':
|
||||||
|
return time.Second, true
|
||||||
|
case 'm':
|
||||||
|
return time.Millisecond, true
|
||||||
|
case 'u':
|
||||||
|
return time.Microsecond, true
|
||||||
|
case 'n':
|
||||||
|
return time.Nanosecond, true
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// isPermanentHTTPHeader checks whether hdr belongs to the list of
|
||||||
|
// permenant request headers maintained by IANA.
|
||||||
|
// http://www.iana.org/assignments/message-headers/message-headers.xml
|
||||||
|
func isPermanentHTTPHeader(hdr string) bool {
|
||||||
|
switch hdr {
|
||||||
|
case
|
||||||
|
"Accept",
|
||||||
|
"Accept-Charset",
|
||||||
|
"Accept-Language",
|
||||||
|
"Accept-Ranges",
|
||||||
|
"Authorization",
|
||||||
|
"Cache-Control",
|
||||||
|
"Content-Type",
|
||||||
|
"Cookie",
|
||||||
|
"Date",
|
||||||
|
"Expect",
|
||||||
|
"From",
|
||||||
|
"Host",
|
||||||
|
"If-Match",
|
||||||
|
"If-Modified-Since",
|
||||||
|
"If-None-Match",
|
||||||
|
"If-Schedule-Tag-Match",
|
||||||
|
"If-Unmodified-Since",
|
||||||
|
"Max-Forwards",
|
||||||
|
"Origin",
|
||||||
|
"Pragma",
|
||||||
|
"Referer",
|
||||||
|
"User-Agent",
|
||||||
|
"Via",
|
||||||
|
"Warning":
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
58
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go
generated
vendored
Normal file
58
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go
generated
vendored
Normal file
|
@ -0,0 +1,58 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
// String just returns the given string.
|
||||||
|
// It is just for compatibility to other types.
|
||||||
|
func String(val string) (string, error) {
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bool converts the given string representation of a boolean value into bool.
|
||||||
|
func Bool(val string) (bool, error) {
|
||||||
|
return strconv.ParseBool(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64 converts the given string representation into representation of a floating point number into float64.
|
||||||
|
func Float64(val string) (float64, error) {
|
||||||
|
return strconv.ParseFloat(val, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float32 converts the given string representation of a floating point number into float32.
|
||||||
|
func Float32(val string) (float32, error) {
|
||||||
|
f, err := strconv.ParseFloat(val, 32)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return float32(f), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64 converts the given string representation of an integer into int64.
|
||||||
|
func Int64(val string) (int64, error) {
|
||||||
|
return strconv.ParseInt(val, 0, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int32 converts the given string representation of an integer into int32.
|
||||||
|
func Int32(val string) (int32, error) {
|
||||||
|
i, err := strconv.ParseInt(val, 0, 32)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return int32(i), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint64 converts the given string representation of an integer into uint64.
|
||||||
|
func Uint64(val string) (uint64, error) {
|
||||||
|
return strconv.ParseUint(val, 0, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint32 converts the given string representation of an integer into uint32.
|
||||||
|
func Uint32(val string) (uint32, error) {
|
||||||
|
i, err := strconv.ParseUint(val, 0, 32)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return uint32(i), nil
|
||||||
|
}
|
5
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go
generated
vendored
Normal file
5
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
||||||
|
/*
|
||||||
|
Package runtime contains runtime helper functions used by
|
||||||
|
servers which protoc-gen-grpc-gateway generates.
|
||||||
|
*/
|
||||||
|
package runtime
|
121
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go
generated
vendored
Normal file
121
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,121 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
|
||||||
|
func HTTPStatusFromCode(code codes.Code) int {
|
||||||
|
switch code {
|
||||||
|
case codes.OK:
|
||||||
|
return http.StatusOK
|
||||||
|
case codes.Canceled:
|
||||||
|
return http.StatusRequestTimeout
|
||||||
|
case codes.Unknown:
|
||||||
|
return http.StatusInternalServerError
|
||||||
|
case codes.InvalidArgument:
|
||||||
|
return http.StatusBadRequest
|
||||||
|
case codes.DeadlineExceeded:
|
||||||
|
return http.StatusRequestTimeout
|
||||||
|
case codes.NotFound:
|
||||||
|
return http.StatusNotFound
|
||||||
|
case codes.AlreadyExists:
|
||||||
|
return http.StatusConflict
|
||||||
|
case codes.PermissionDenied:
|
||||||
|
return http.StatusForbidden
|
||||||
|
case codes.Unauthenticated:
|
||||||
|
return http.StatusUnauthorized
|
||||||
|
case codes.ResourceExhausted:
|
||||||
|
return http.StatusForbidden
|
||||||
|
case codes.FailedPrecondition:
|
||||||
|
return http.StatusPreconditionFailed
|
||||||
|
case codes.Aborted:
|
||||||
|
return http.StatusConflict
|
||||||
|
case codes.OutOfRange:
|
||||||
|
return http.StatusBadRequest
|
||||||
|
case codes.Unimplemented:
|
||||||
|
return http.StatusNotImplemented
|
||||||
|
case codes.Internal:
|
||||||
|
return http.StatusInternalServerError
|
||||||
|
case codes.Unavailable:
|
||||||
|
return http.StatusServiceUnavailable
|
||||||
|
case codes.DataLoss:
|
||||||
|
return http.StatusInternalServerError
|
||||||
|
}
|
||||||
|
|
||||||
|
grpclog.Printf("Unknown gRPC error code: %v", code)
|
||||||
|
return http.StatusInternalServerError
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// HTTPError replies to the request with the error.
|
||||||
|
// You can set a custom function to this variable to customize error format.
|
||||||
|
HTTPError = DefaultHTTPError
|
||||||
|
// OtherErrorHandler handles the following error used by the gateway: StatusMethodNotAllowed StatusNotFound and StatusBadRequest
|
||||||
|
OtherErrorHandler = DefaultOtherErrorHandler
|
||||||
|
)
|
||||||
|
|
||||||
|
type errorBody struct {
|
||||||
|
Error string `protobuf:"bytes,1,name=error" json:"error"`
|
||||||
|
Code int32 `protobuf:"varint,2,name=code" json:"code"`
|
||||||
|
}
|
||||||
|
|
||||||
|
//Make this also conform to proto.Message for builtin JSONPb Marshaler
|
||||||
|
func (e *errorBody) Reset() { *e = errorBody{} }
|
||||||
|
func (e *errorBody) String() string { return proto.CompactTextString(e) }
|
||||||
|
func (*errorBody) ProtoMessage() {}
|
||||||
|
|
||||||
|
// DefaultHTTPError is the default implementation of HTTPError.
|
||||||
|
// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
|
||||||
|
// If otherwise, it replies with http.StatusInternalServerError.
|
||||||
|
//
|
||||||
|
// The response body returned by this function is a JSON object,
|
||||||
|
// which contains a member whose key is "error" and whose value is err.Error().
|
||||||
|
func DefaultHTTPError(ctx context.Context, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
|
||||||
|
const fallback = `{"error": "failed to marshal error message"}`
|
||||||
|
|
||||||
|
w.Header().Del("Trailer")
|
||||||
|
w.Header().Set("Content-Type", marshaler.ContentType())
|
||||||
|
body := &errorBody{
|
||||||
|
Error: grpc.ErrorDesc(err),
|
||||||
|
Code: int32(grpc.Code(err)),
|
||||||
|
}
|
||||||
|
|
||||||
|
buf, merr := marshaler.Marshal(body)
|
||||||
|
if merr != nil {
|
||||||
|
grpclog.Printf("Failed to marshal error message %q: %v", body, merr)
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
if _, err := io.WriteString(w, fallback); err != nil {
|
||||||
|
grpclog.Printf("Failed to write response: %v", err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
md, ok := ServerMetadataFromContext(ctx)
|
||||||
|
if !ok {
|
||||||
|
grpclog.Printf("Failed to extract ServerMetadata from context")
|
||||||
|
}
|
||||||
|
|
||||||
|
handleForwardResponseServerMetadata(w, md)
|
||||||
|
handleForwardResponseTrailerHeader(w, md)
|
||||||
|
st := HTTPStatusFromCode(grpc.Code(err))
|
||||||
|
w.WriteHeader(st)
|
||||||
|
if _, err := w.Write(buf); err != nil {
|
||||||
|
grpclog.Printf("Failed to write response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
handleForwardResponseTrailer(w, md)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler.
|
||||||
|
// It simply writes a string representation of the given error into "w".
|
||||||
|
func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) {
|
||||||
|
http.Error(w, msg, code)
|
||||||
|
}
|
164
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go
generated
vendored
Normal file
164
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go
generated
vendored
Normal file
|
@ -0,0 +1,164 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/textproto"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"github.com/grpc-ecosystem/grpc-gateway/runtime/internal"
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ForwardResponseStream forwards the stream from gRPC server to REST client.
|
||||||
|
func ForwardResponseStream(ctx context.Context, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
|
||||||
|
f, ok := w.(http.Flusher)
|
||||||
|
if !ok {
|
||||||
|
grpclog.Printf("Flush not supported in %T", w)
|
||||||
|
http.Error(w, "unexpected type of web server", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
md, ok := ServerMetadataFromContext(ctx)
|
||||||
|
if !ok {
|
||||||
|
grpclog.Printf("Failed to extract ServerMetadata from context")
|
||||||
|
http.Error(w, "unexpected error", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
handleForwardResponseServerMetadata(w, md)
|
||||||
|
|
||||||
|
w.Header().Set("Transfer-Encoding", "chunked")
|
||||||
|
w.Header().Set("Content-Type", marshaler.ContentType())
|
||||||
|
if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil {
|
||||||
|
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
f.Flush()
|
||||||
|
for {
|
||||||
|
resp, err := recv()
|
||||||
|
if err == io.EOF {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
handleForwardResponseStreamError(marshaler, w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
|
||||||
|
handleForwardResponseStreamError(marshaler, w, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
buf, err := marshaler.Marshal(streamChunk(resp, nil))
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Printf("Failed to marshal response chunk: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err = fmt.Fprintf(w, "%s\n", buf); err != nil {
|
||||||
|
grpclog.Printf("Failed to send response chunk: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
f.Flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleForwardResponseServerMetadata(w http.ResponseWriter, md ServerMetadata) {
|
||||||
|
for k, vs := range md.HeaderMD {
|
||||||
|
hKey := fmt.Sprintf("%s%s", MetadataHeaderPrefix, k)
|
||||||
|
for i := range vs {
|
||||||
|
w.Header().Add(hKey, vs[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) {
|
||||||
|
for k := range md.TrailerMD {
|
||||||
|
tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k))
|
||||||
|
w.Header().Add("Trailer", tKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) {
|
||||||
|
for k, vs := range md.TrailerMD {
|
||||||
|
tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)
|
||||||
|
for i := range vs {
|
||||||
|
w.Header().Add(tKey, vs[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
|
||||||
|
func ForwardResponseMessage(ctx context.Context, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
|
||||||
|
md, ok := ServerMetadataFromContext(ctx)
|
||||||
|
if !ok {
|
||||||
|
grpclog.Printf("Failed to extract ServerMetadata from context")
|
||||||
|
}
|
||||||
|
|
||||||
|
handleForwardResponseServerMetadata(w, md)
|
||||||
|
handleForwardResponseTrailerHeader(w, md)
|
||||||
|
w.Header().Set("Content-Type", marshaler.ContentType())
|
||||||
|
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
|
||||||
|
HTTPError(ctx, marshaler, w, req, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
buf, err := marshaler.Marshal(resp)
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Printf("Marshal error: %v", err)
|
||||||
|
HTTPError(ctx, marshaler, w, req, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = w.Write(buf); err != nil {
|
||||||
|
grpclog.Printf("Failed to write response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
handleForwardResponseTrailer(w, md)
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error {
|
||||||
|
if len(opts) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
if err := opt(ctx, w, resp); err != nil {
|
||||||
|
grpclog.Printf("Error handling ForwardResponseOptions: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleForwardResponseStreamError(marshaler Marshaler, w http.ResponseWriter, err error) {
|
||||||
|
buf, merr := marshaler.Marshal(streamChunk(nil, err))
|
||||||
|
if merr != nil {
|
||||||
|
grpclog.Printf("Failed to marshal an error: %v", merr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, werr := fmt.Fprintf(w, "%s\n", buf); werr != nil {
|
||||||
|
grpclog.Printf("Failed to notify error to client: %v", werr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func streamChunk(result proto.Message, err error) map[string]proto.Message {
|
||||||
|
if err != nil {
|
||||||
|
grpcCode := grpc.Code(err)
|
||||||
|
httpCode := HTTPStatusFromCode(grpcCode)
|
||||||
|
return map[string]proto.Message{
|
||||||
|
"error": &internal.StreamError{
|
||||||
|
GrpcCode: int32(grpcCode),
|
||||||
|
HttpCode: int32(httpCode),
|
||||||
|
Message: err.Error(),
|
||||||
|
HttpStatus: http.StatusText(httpCode),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if result == nil {
|
||||||
|
return streamChunk(nil, fmt.Errorf("empty response"))
|
||||||
|
}
|
||||||
|
return map[string]proto.Message{"result": result}
|
||||||
|
}
|
93
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go
generated
vendored
Normal file
93
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.pb.go
generated
vendored
Normal file
|
@ -0,0 +1,93 @@
|
||||||
|
// Code generated by protoc-gen-go.
|
||||||
|
// source: runtime/internal/stream_chunk.proto
|
||||||
|
// DO NOT EDIT!
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package internal is a generated protocol buffer package.
|
||||||
|
|
||||||
|
It is generated from these files:
|
||||||
|
runtime/internal/stream_chunk.proto
|
||||||
|
|
||||||
|
It has these top-level messages:
|
||||||
|
StreamError
|
||||||
|
*/
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import proto "github.com/golang/protobuf/proto"
|
||||||
|
import fmt "fmt"
|
||||||
|
import math "math"
|
||||||
|
|
||||||
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
|
var _ = proto.Marshal
|
||||||
|
var _ = fmt.Errorf
|
||||||
|
var _ = math.Inf
|
||||||
|
|
||||||
|
// This is a compile-time assertion to ensure that this generated file
|
||||||
|
// is compatible with the proto package it is being compiled against.
|
||||||
|
// A compilation error at this line likely means your copy of the
|
||||||
|
// proto package needs to be updated.
|
||||||
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
|
// StreamError is a response type which is returned when
|
||||||
|
// streaming rpc returns an error.
|
||||||
|
type StreamError struct {
|
||||||
|
GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode" json:"grpc_code,omitempty"`
|
||||||
|
HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode" json:"http_code,omitempty"`
|
||||||
|
Message string `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"`
|
||||||
|
HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus" json:"http_status,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *StreamError) Reset() { *m = StreamError{} }
|
||||||
|
func (m *StreamError) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*StreamError) ProtoMessage() {}
|
||||||
|
func (*StreamError) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||||
|
|
||||||
|
func (m *StreamError) GetGrpcCode() int32 {
|
||||||
|
if m != nil {
|
||||||
|
return m.GrpcCode
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *StreamError) GetHttpCode() int32 {
|
||||||
|
if m != nil {
|
||||||
|
return m.HttpCode
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *StreamError) GetMessage() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.Message
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *StreamError) GetHttpStatus() string {
|
||||||
|
if m != nil {
|
||||||
|
return m.HttpStatus
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError")
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() { proto.RegisterFile("runtime/internal/stream_chunk.proto", fileDescriptor0) }
|
||||||
|
|
||||||
|
var fileDescriptor0 = []byte{
|
||||||
|
// 181 bytes of a gzipped FileDescriptorProto
|
||||||
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x34, 0x8e, 0xbf, 0xee, 0x82, 0x30,
|
||||||
|
0x14, 0x85, 0xd3, 0xdf, 0x1f, 0x85, 0xcb, 0x46, 0x1c, 0x9a, 0x38, 0x48, 0x74, 0x61, 0x82, 0xc1,
|
||||||
|
0x37, 0xd0, 0xf8, 0x02, 0xb0, 0xb9, 0x90, 0x0a, 0x37, 0x40, 0x94, 0x96, 0xdc, 0x5e, 0x62, 0x5c,
|
||||||
|
0x7d, 0x72, 0xd3, 0x22, 0xe3, 0xf9, 0xbe, 0x73, 0x92, 0x03, 0x07, 0x9a, 0x34, 0xf7, 0x03, 0xe6,
|
||||||
|
0xbd, 0x66, 0x24, 0xad, 0x1e, 0xb9, 0x65, 0x42, 0x35, 0x54, 0x75, 0x37, 0xe9, 0x7b, 0x36, 0x92,
|
||||||
|
0x61, 0x13, 0x6f, 0x5a, 0x1a, 0xeb, 0xac, 0x55, 0x8c, 0x4f, 0xf5, 0xca, 0xbe, 0x8b, 0xfd, 0x5b,
|
||||||
|
0x40, 0x54, 0xfa, 0xf2, 0x85, 0xc8, 0x50, 0xbc, 0x85, 0xd0, 0xf5, 0xaa, 0xda, 0x34, 0x28, 0x45,
|
||||||
|
0x22, 0xd2, 0xff, 0x22, 0x70, 0xe0, 0x6c, 0x1a, 0x74, 0xb2, 0x63, 0x1e, 0x67, 0xf9, 0x33, 0x4b,
|
||||||
|
0x07, 0xbc, 0x94, 0xb0, 0x1e, 0xd0, 0x5a, 0xd5, 0xa2, 0xfc, 0x4d, 0x44, 0x1a, 0x16, 0x4b, 0x8c,
|
||||||
|
0x77, 0x10, 0xf9, 0x99, 0x65, 0xc5, 0x93, 0x95, 0x7f, 0xde, 0x82, 0x43, 0xa5, 0x27, 0x27, 0xb8,
|
||||||
|
0x06, 0xcb, 0xf3, 0xdb, 0xca, 0xbf, 0x3d, 0x7e, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa9, 0x07, 0x92,
|
||||||
|
0xb6, 0xd4, 0x00, 0x00, 0x00,
|
||||||
|
}
|
12
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.proto
generated
vendored
Normal file
12
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/internal/stream_chunk.proto
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
syntax = "proto3";
|
||||||
|
package grpc.gateway.runtime;
|
||||||
|
option go_package = "internal";
|
||||||
|
|
||||||
|
// StreamError is a response type which is returned when
|
||||||
|
// streaming rpc returns an error.
|
||||||
|
message StreamError {
|
||||||
|
int32 grpc_code = 1;
|
||||||
|
int32 http_code = 2;
|
||||||
|
string message = 3;
|
||||||
|
string http_status = 4;
|
||||||
|
}
|
37
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
generated
vendored
Normal file
37
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON
|
||||||
|
// with the standard "encoding/json" package of Golang.
|
||||||
|
// Although it is generally faster for simple proto messages than JSONPb,
|
||||||
|
// it does not support advanced features of protobuf, e.g. map, oneof, ....
|
||||||
|
type JSONBuiltin struct{}
|
||||||
|
|
||||||
|
// ContentType always Returns "application/json".
|
||||||
|
func (*JSONBuiltin) ContentType() string {
|
||||||
|
return "application/json"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal marshals "v" into JSON
|
||||||
|
func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) {
|
||||||
|
return json.Marshal(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal unmarshals JSON data into "v".
|
||||||
|
func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error {
|
||||||
|
return json.Unmarshal(data, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder returns a Decoder which reads JSON stream from "r".
|
||||||
|
func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder {
|
||||||
|
return json.NewDecoder(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncoder returns an Encoder which writes JSON stream into "w".
|
||||||
|
func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder {
|
||||||
|
return json.NewEncoder(w)
|
||||||
|
}
|
184
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
generated
vendored
Normal file
184
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
generated
vendored
Normal file
|
@ -0,0 +1,184 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/jsonpb"
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// JSONPb is a Marshaler which marshals/unmarshals into/from JSON
|
||||||
|
// with the "github.com/golang/protobuf/jsonpb".
|
||||||
|
// It supports fully functionality of protobuf unlike JSONBuiltin.
|
||||||
|
type JSONPb jsonpb.Marshaler
|
||||||
|
|
||||||
|
// ContentType always returns "application/json".
|
||||||
|
func (*JSONPb) ContentType() string {
|
||||||
|
return "application/json"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal marshals "v" into JSON
|
||||||
|
// Currently it can marshal only proto.Message.
|
||||||
|
// TODO(yugui) Support fields of primitive types in a message.
|
||||||
|
func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
|
||||||
|
if _, ok := v.(proto.Message); !ok {
|
||||||
|
return j.marshalNonProtoField(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := j.marshalTo(&buf, v); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
|
||||||
|
p, ok := v.(proto.Message)
|
||||||
|
if !ok {
|
||||||
|
buf, err := j.marshalNonProtoField(v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = w.Write(buf)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return (*jsonpb.Marshaler)(j).Marshal(w, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalNonProto marshals a non-message field of a protobuf message.
|
||||||
|
// This function does not correctly marshals arbitary data structure into JSON,
|
||||||
|
// but it is only capable of marshaling non-message field values of protobuf,
|
||||||
|
// i.e. primitive types, enums; pointers to primitives or enums; maps from
|
||||||
|
// integer/string types to primitives/enums/pointers to messages.
|
||||||
|
func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
for rv.Kind() == reflect.Ptr {
|
||||||
|
if rv.IsNil() {
|
||||||
|
return []byte("null"), nil
|
||||||
|
}
|
||||||
|
rv = rv.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if rv.Kind() == reflect.Map {
|
||||||
|
m := make(map[string]*json.RawMessage)
|
||||||
|
for _, k := range rv.MapKeys() {
|
||||||
|
buf, err := j.Marshal(rv.MapIndex(k).Interface())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
|
||||||
|
}
|
||||||
|
if j.Indent != "" {
|
||||||
|
return json.MarshalIndent(m, "", j.Indent)
|
||||||
|
}
|
||||||
|
return json.Marshal(m)
|
||||||
|
}
|
||||||
|
if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts {
|
||||||
|
return json.Marshal(enum.String())
|
||||||
|
}
|
||||||
|
return json.Marshal(rv.Interface())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal unmarshals JSON "data" into "v"
|
||||||
|
// Currently it can marshal only proto.Message.
|
||||||
|
// TODO(yugui) Support fields of primitive types in a message.
|
||||||
|
func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
|
||||||
|
return unmarshalJSONPb(data, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder returns a Decoder which reads JSON stream from "r".
|
||||||
|
func (j *JSONPb) NewDecoder(r io.Reader) Decoder {
|
||||||
|
d := json.NewDecoder(r)
|
||||||
|
return DecoderFunc(func(v interface{}) error { return decodeJSONPb(d, v) })
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncoder returns an Encoder which writes JSON stream into "w".
|
||||||
|
func (j *JSONPb) NewEncoder(w io.Writer) Encoder {
|
||||||
|
return EncoderFunc(func(v interface{}) error { return j.marshalTo(w, v) })
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalJSONPb(data []byte, v interface{}) error {
|
||||||
|
d := json.NewDecoder(bytes.NewReader(data))
|
||||||
|
return decodeJSONPb(d, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeJSONPb(d *json.Decoder, v interface{}) error {
|
||||||
|
p, ok := v.(proto.Message)
|
||||||
|
if !ok {
|
||||||
|
return decodeNonProtoField(d, v)
|
||||||
|
}
|
||||||
|
unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true}
|
||||||
|
return unmarshaler.UnmarshalNext(d, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeNonProtoField(d *json.Decoder, v interface{}) error {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.Kind() != reflect.Ptr {
|
||||||
|
return fmt.Errorf("%T is not a pointer", v)
|
||||||
|
}
|
||||||
|
for rv.Kind() == reflect.Ptr {
|
||||||
|
if rv.IsNil() {
|
||||||
|
rv.Set(reflect.New(rv.Type().Elem()))
|
||||||
|
}
|
||||||
|
if rv.Type().ConvertibleTo(typeProtoMessage) {
|
||||||
|
unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: true}
|
||||||
|
return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message))
|
||||||
|
}
|
||||||
|
rv = rv.Elem()
|
||||||
|
}
|
||||||
|
if rv.Kind() == reflect.Map {
|
||||||
|
if rv.IsNil() {
|
||||||
|
rv.Set(reflect.MakeMap(rv.Type()))
|
||||||
|
}
|
||||||
|
conv, ok := convFromType[rv.Type().Key().Kind()]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key())
|
||||||
|
}
|
||||||
|
|
||||||
|
m := make(map[string]*json.RawMessage)
|
||||||
|
if err := d.Decode(&m); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for k, v := range m {
|
||||||
|
result := conv.Call([]reflect.Value{reflect.ValueOf(k)})
|
||||||
|
if err := result[1].Interface(); err != nil {
|
||||||
|
return err.(error)
|
||||||
|
}
|
||||||
|
bk := result[0]
|
||||||
|
bv := reflect.New(rv.Type().Elem())
|
||||||
|
if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rv.SetMapIndex(bk, bv.Elem())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if _, ok := rv.Interface().(protoEnum); ok {
|
||||||
|
var repr interface{}
|
||||||
|
if err := d.Decode(&repr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch repr.(type) {
|
||||||
|
case string:
|
||||||
|
// TODO(yugui) Should use proto.StructProperties?
|
||||||
|
return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface())
|
||||||
|
case float64:
|
||||||
|
rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type()))
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return d.Decode(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
type protoEnum interface {
|
||||||
|
fmt.Stringer
|
||||||
|
EnumDescriptor() ([]byte, []int)
|
||||||
|
}
|
||||||
|
|
||||||
|
var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
|
42
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
generated
vendored
Normal file
42
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Marshaler defines a conversion between byte sequence and gRPC payloads / fields.
|
||||||
|
type Marshaler interface {
|
||||||
|
// Marshal marshals "v" into byte sequence.
|
||||||
|
Marshal(v interface{}) ([]byte, error)
|
||||||
|
// Unmarshal unmarshals "data" into "v".
|
||||||
|
// "v" must be a pointer value.
|
||||||
|
Unmarshal(data []byte, v interface{}) error
|
||||||
|
// NewDecoder returns a Decoder which reads byte sequence from "r".
|
||||||
|
NewDecoder(r io.Reader) Decoder
|
||||||
|
// NewEncoder returns an Encoder which writes bytes sequence into "w".
|
||||||
|
NewEncoder(w io.Writer) Encoder
|
||||||
|
// ContentType returns the Content-Type which this marshaler is responsible for.
|
||||||
|
ContentType() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decoder decodes a byte sequence
|
||||||
|
type Decoder interface {
|
||||||
|
Decode(v interface{}) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encoder encodes gRPC payloads / fields into byte sequence.
|
||||||
|
type Encoder interface {
|
||||||
|
Encode(v interface{}) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecoderFunc adapts an decoder function into Decoder.
|
||||||
|
type DecoderFunc func(v interface{}) error
|
||||||
|
|
||||||
|
// Decode delegates invocations to the underlying function itself.
|
||||||
|
func (f DecoderFunc) Decode(v interface{}) error { return f(v) }
|
||||||
|
|
||||||
|
// EncoderFunc adapts an encoder function into Encoder
|
||||||
|
type EncoderFunc func(v interface{}) error
|
||||||
|
|
||||||
|
// Encode delegates invocations to the underlying function itself.
|
||||||
|
func (f EncoderFunc) Encode(v interface{}) error { return f(v) }
|
91
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
generated
vendored
Normal file
91
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MIMEWildcard is the fallback MIME type used for requests which do not match
|
||||||
|
// a registered MIME type.
|
||||||
|
const MIMEWildcard = "*"
|
||||||
|
|
||||||
|
var (
|
||||||
|
acceptHeader = http.CanonicalHeaderKey("Accept")
|
||||||
|
contentTypeHeader = http.CanonicalHeaderKey("Content-Type")
|
||||||
|
|
||||||
|
defaultMarshaler = &JSONPb{OrigName: true}
|
||||||
|
)
|
||||||
|
|
||||||
|
// MarshalerForRequest returns the inbound/outbound marshalers for this request.
|
||||||
|
// It checks the registry on the ServeMux for the MIME type set by the Content-Type header.
|
||||||
|
// If it isn't set (or the request Content-Type is empty), checks for "*".
|
||||||
|
// If there are multiple Content-Type headers set, choose the first one that it can
|
||||||
|
// exactly match in the registry.
|
||||||
|
// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler.
|
||||||
|
func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) {
|
||||||
|
for _, acceptVal := range r.Header[acceptHeader] {
|
||||||
|
if m, ok := mux.marshalers.mimeMap[acceptVal]; ok {
|
||||||
|
outbound = m
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, contentTypeVal := range r.Header[contentTypeHeader] {
|
||||||
|
if m, ok := mux.marshalers.mimeMap[contentTypeVal]; ok {
|
||||||
|
inbound = m
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if inbound == nil {
|
||||||
|
inbound = mux.marshalers.mimeMap[MIMEWildcard]
|
||||||
|
}
|
||||||
|
if outbound == nil {
|
||||||
|
outbound = inbound
|
||||||
|
}
|
||||||
|
|
||||||
|
return inbound, outbound
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalerRegistry is a mapping from MIME types to Marshalers.
|
||||||
|
type marshalerRegistry struct {
|
||||||
|
mimeMap map[string]Marshaler
|
||||||
|
}
|
||||||
|
|
||||||
|
// add adds a marshaler for a case-sensitive MIME type string ("*" to match any
|
||||||
|
// MIME type).
|
||||||
|
func (m marshalerRegistry) add(mime string, marshaler Marshaler) error {
|
||||||
|
if len(mime) == 0 {
|
||||||
|
return errors.New("empty MIME type")
|
||||||
|
}
|
||||||
|
|
||||||
|
m.mimeMap[mime] = marshaler
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// makeMarshalerMIMERegistry returns a new registry of marshalers.
|
||||||
|
// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
|
||||||
|
//
|
||||||
|
// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
|
||||||
|
// with a "applicaton/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
|
||||||
|
// with a "application/json" Content-Type.
|
||||||
|
// "*" can be used to match any Content-Type.
|
||||||
|
// This can be attached to a ServerMux with the marshaler option.
|
||||||
|
func makeMarshalerMIMERegistry() marshalerRegistry {
|
||||||
|
return marshalerRegistry{
|
||||||
|
mimeMap: map[string]Marshaler{
|
||||||
|
MIMEWildcard: defaultMarshaler,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound
|
||||||
|
// Marshalers to a MIME type in mux.
|
||||||
|
func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption {
|
||||||
|
return func(mux *ServeMux) {
|
||||||
|
if err := mux.marshalers.add(mime, marshaler); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
132
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go
generated
vendored
Normal file
132
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go
generated
vendored
Normal file
|
@ -0,0 +1,132 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A HandlerFunc handles a specific pair of path pattern and HTTP method.
|
||||||
|
type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
|
||||||
|
|
||||||
|
// ServeMux is a request multiplexer for grpc-gateway.
|
||||||
|
// It matches http requests to patterns and invokes the corresponding handler.
|
||||||
|
type ServeMux struct {
|
||||||
|
// handlers maps HTTP method to a list of handlers.
|
||||||
|
handlers map[string][]handler
|
||||||
|
forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error
|
||||||
|
marshalers marshalerRegistry
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServeMuxOption is an option that can be given to a ServeMux on construction.
|
||||||
|
type ServeMuxOption func(*ServeMux)
|
||||||
|
|
||||||
|
// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
|
||||||
|
//
|
||||||
|
// forwardResponseOption is an option that will be called on the relevant context.Context,
|
||||||
|
// http.ResponseWriter, and proto.Message before every forwarded response.
|
||||||
|
//
|
||||||
|
// The message may be nil in the case where just a header is being sent.
|
||||||
|
func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption {
|
||||||
|
return func(serveMux *ServeMux) {
|
||||||
|
serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServeMux returns a new ServeMux whose internal mapping is empty.
|
||||||
|
func NewServeMux(opts ...ServeMuxOption) *ServeMux {
|
||||||
|
serveMux := &ServeMux{
|
||||||
|
handlers: make(map[string][]handler),
|
||||||
|
forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
|
||||||
|
marshalers: makeMarshalerMIMERegistry(),
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(serveMux)
|
||||||
|
}
|
||||||
|
return serveMux
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle associates "h" to the pair of HTTP method and path pattern.
|
||||||
|
func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
|
||||||
|
s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h})
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path.
|
||||||
|
func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
path := r.URL.Path
|
||||||
|
if !strings.HasPrefix(path, "/") {
|
||||||
|
OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
components := strings.Split(path[1:], "/")
|
||||||
|
l := len(components)
|
||||||
|
var verb string
|
||||||
|
if idx := strings.LastIndex(components[l-1], ":"); idx == 0 {
|
||||||
|
OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
||||||
|
return
|
||||||
|
} else if idx > 0 {
|
||||||
|
c := components[l-1]
|
||||||
|
components[l-1], verb = c[:idx], c[idx+1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && isPathLengthFallback(r) {
|
||||||
|
r.Method = strings.ToUpper(override)
|
||||||
|
if err := r.ParseForm(); err != nil {
|
||||||
|
OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, h := range s.handlers[r.Method] {
|
||||||
|
pathParams, err := h.pat.Match(components, verb)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
h.h(w, r, pathParams)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// lookup other methods to handle fallback from GET to POST and
|
||||||
|
// to determine if it is MethodNotAllowed or NotFound.
|
||||||
|
for m, handlers := range s.handlers {
|
||||||
|
if m == r.Method {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, h := range handlers {
|
||||||
|
pathParams, err := h.pat.Match(components, verb)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// X-HTTP-Method-Override is optional. Always allow fallback to POST.
|
||||||
|
if isPathLengthFallback(r) {
|
||||||
|
if err := r.ParseForm(); err != nil {
|
||||||
|
OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
h.h(w, r, pathParams)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.
|
||||||
|
func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error {
|
||||||
|
return s.forwardResponseOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
func isPathLengthFallback(r *http.Request) bool {
|
||||||
|
return r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
|
||||||
|
}
|
||||||
|
|
||||||
|
type handler struct {
|
||||||
|
pat Pattern
|
||||||
|
h HandlerFunc
|
||||||
|
}
|
227
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go
generated
vendored
Normal file
227
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go
generated
vendored
Normal file
|
@ -0,0 +1,227 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/grpc-ecosystem/grpc-gateway/utilities"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNotMatch indicates that the given HTTP request path does not match to the pattern.
|
||||||
|
ErrNotMatch = errors.New("not match to the path pattern")
|
||||||
|
// ErrInvalidPattern indicates that the given definition of Pattern is not valid.
|
||||||
|
ErrInvalidPattern = errors.New("invalid pattern")
|
||||||
|
)
|
||||||
|
|
||||||
|
type op struct {
|
||||||
|
code utilities.OpCode
|
||||||
|
operand int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto.
|
||||||
|
type Pattern struct {
|
||||||
|
// ops is a list of operations
|
||||||
|
ops []op
|
||||||
|
// pool is a constant pool indexed by the operands or vars.
|
||||||
|
pool []string
|
||||||
|
// vars is a list of variables names to be bound by this pattern
|
||||||
|
vars []string
|
||||||
|
// stacksize is the max depth of the stack
|
||||||
|
stacksize int
|
||||||
|
// tailLen is the length of the fixed-size segments after a deep wildcard
|
||||||
|
tailLen int
|
||||||
|
// verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part.
|
||||||
|
verb string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPattern returns a new Pattern from the given definition values.
|
||||||
|
// "ops" is a sequence of op codes. "pool" is a constant pool.
|
||||||
|
// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part.
|
||||||
|
// "version" must be 1 for now.
|
||||||
|
// It returns an error if the given definition is invalid.
|
||||||
|
func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) {
|
||||||
|
if version != 1 {
|
||||||
|
grpclog.Printf("unsupported version: %d", version)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
|
||||||
|
l := len(ops)
|
||||||
|
if l%2 != 0 {
|
||||||
|
grpclog.Printf("odd number of ops codes: %d", l)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
typedOps []op
|
||||||
|
stack, maxstack int
|
||||||
|
tailLen int
|
||||||
|
pushMSeen bool
|
||||||
|
vars []string
|
||||||
|
)
|
||||||
|
for i := 0; i < l; i += 2 {
|
||||||
|
op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]}
|
||||||
|
switch op.code {
|
||||||
|
case utilities.OpNop:
|
||||||
|
continue
|
||||||
|
case utilities.OpPush:
|
||||||
|
if pushMSeen {
|
||||||
|
tailLen++
|
||||||
|
}
|
||||||
|
stack++
|
||||||
|
case utilities.OpPushM:
|
||||||
|
if pushMSeen {
|
||||||
|
grpclog.Printf("pushM appears twice")
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
pushMSeen = true
|
||||||
|
stack++
|
||||||
|
case utilities.OpLitPush:
|
||||||
|
if op.operand < 0 || len(pool) <= op.operand {
|
||||||
|
grpclog.Printf("negative literal index: %d", op.operand)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
if pushMSeen {
|
||||||
|
tailLen++
|
||||||
|
}
|
||||||
|
stack++
|
||||||
|
case utilities.OpConcatN:
|
||||||
|
if op.operand <= 0 {
|
||||||
|
grpclog.Printf("negative concat size: %d", op.operand)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
stack -= op.operand
|
||||||
|
if stack < 0 {
|
||||||
|
grpclog.Print("stack underflow")
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
stack++
|
||||||
|
case utilities.OpCapture:
|
||||||
|
if op.operand < 0 || len(pool) <= op.operand {
|
||||||
|
grpclog.Printf("variable name index out of bound: %d", op.operand)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
v := pool[op.operand]
|
||||||
|
op.operand = len(vars)
|
||||||
|
vars = append(vars, v)
|
||||||
|
stack--
|
||||||
|
if stack < 0 {
|
||||||
|
grpclog.Printf("stack underflow")
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
grpclog.Printf("invalid opcode: %d", op.code)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
|
||||||
|
if maxstack < stack {
|
||||||
|
maxstack = stack
|
||||||
|
}
|
||||||
|
typedOps = append(typedOps, op)
|
||||||
|
}
|
||||||
|
return Pattern{
|
||||||
|
ops: typedOps,
|
||||||
|
pool: pool,
|
||||||
|
vars: vars,
|
||||||
|
stacksize: maxstack,
|
||||||
|
tailLen: tailLen,
|
||||||
|
verb: verb,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization.
|
||||||
|
func MustPattern(p Pattern, err error) Pattern {
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Fatalf("Pattern initialization failed: %v", err)
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Match examines components if it matches to the Pattern.
|
||||||
|
// If it matches, the function returns a mapping from field paths to their captured values.
|
||||||
|
// If otherwise, the function returns an error.
|
||||||
|
func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
|
||||||
|
if p.verb != verb {
|
||||||
|
return nil, ErrNotMatch
|
||||||
|
}
|
||||||
|
|
||||||
|
var pos int
|
||||||
|
stack := make([]string, 0, p.stacksize)
|
||||||
|
captured := make([]string, len(p.vars))
|
||||||
|
l := len(components)
|
||||||
|
for _, op := range p.ops {
|
||||||
|
switch op.code {
|
||||||
|
case utilities.OpNop:
|
||||||
|
continue
|
||||||
|
case utilities.OpPush, utilities.OpLitPush:
|
||||||
|
if pos >= l {
|
||||||
|
return nil, ErrNotMatch
|
||||||
|
}
|
||||||
|
c := components[pos]
|
||||||
|
if op.code == utilities.OpLitPush {
|
||||||
|
if lit := p.pool[op.operand]; c != lit {
|
||||||
|
return nil, ErrNotMatch
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stack = append(stack, c)
|
||||||
|
pos++
|
||||||
|
case utilities.OpPushM:
|
||||||
|
end := len(components)
|
||||||
|
if end < pos+p.tailLen {
|
||||||
|
return nil, ErrNotMatch
|
||||||
|
}
|
||||||
|
end -= p.tailLen
|
||||||
|
stack = append(stack, strings.Join(components[pos:end], "/"))
|
||||||
|
pos = end
|
||||||
|
case utilities.OpConcatN:
|
||||||
|
n := op.operand
|
||||||
|
l := len(stack) - n
|
||||||
|
stack = append(stack[:l], strings.Join(stack[l:], "/"))
|
||||||
|
case utilities.OpCapture:
|
||||||
|
n := len(stack) - 1
|
||||||
|
captured[op.operand] = stack[n]
|
||||||
|
stack = stack[:n]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pos < l {
|
||||||
|
return nil, ErrNotMatch
|
||||||
|
}
|
||||||
|
bindings := make(map[string]string)
|
||||||
|
for i, val := range captured {
|
||||||
|
bindings[p.vars[i]] = val
|
||||||
|
}
|
||||||
|
return bindings, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verb returns the verb part of the Pattern.
|
||||||
|
func (p Pattern) Verb() string { return p.verb }
|
||||||
|
|
||||||
|
func (p Pattern) String() string {
|
||||||
|
var stack []string
|
||||||
|
for _, op := range p.ops {
|
||||||
|
switch op.code {
|
||||||
|
case utilities.OpNop:
|
||||||
|
continue
|
||||||
|
case utilities.OpPush:
|
||||||
|
stack = append(stack, "*")
|
||||||
|
case utilities.OpLitPush:
|
||||||
|
stack = append(stack, p.pool[op.operand])
|
||||||
|
case utilities.OpPushM:
|
||||||
|
stack = append(stack, "**")
|
||||||
|
case utilities.OpConcatN:
|
||||||
|
n := op.operand
|
||||||
|
l := len(stack) - n
|
||||||
|
stack = append(stack[:l], strings.Join(stack[l:], "/"))
|
||||||
|
case utilities.OpCapture:
|
||||||
|
n := len(stack) - 1
|
||||||
|
stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
segs := strings.Join(stack, "/")
|
||||||
|
if p.verb != "" {
|
||||||
|
return fmt.Sprintf("/%s:%s", segs, p.verb)
|
||||||
|
}
|
||||||
|
return "/" + segs
|
||||||
|
}
|
80
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
generated
vendored
Normal file
80
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
generated
vendored
Normal file
|
@ -0,0 +1,80 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StringP returns a pointer to a string whose pointee is same as the given string value.
|
||||||
|
func StringP(val string) (*string, error) {
|
||||||
|
return proto.String(val), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolP parses the given string representation of a boolean value,
|
||||||
|
// and returns a pointer to a bool whose value is same as the parsed value.
|
||||||
|
func BoolP(val string) (*bool, error) {
|
||||||
|
b, err := Bool(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Bool(b), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64P parses the given string representation of a floating point number,
|
||||||
|
// and returns a pointer to a float64 whose value is same as the parsed number.
|
||||||
|
func Float64P(val string) (*float64, error) {
|
||||||
|
f, err := Float64(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Float64(f), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float32P parses the given string representation of a floating point number,
|
||||||
|
// and returns a pointer to a float32 whose value is same as the parsed number.
|
||||||
|
func Float32P(val string) (*float32, error) {
|
||||||
|
f, err := Float32(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Float32(f), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64P parses the given string representation of an integer
|
||||||
|
// and returns a pointer to a int64 whose value is same as the parsed integer.
|
||||||
|
func Int64P(val string) (*int64, error) {
|
||||||
|
i, err := Int64(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Int64(i), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int32P parses the given string representation of an integer
|
||||||
|
// and returns a pointer to a int32 whose value is same as the parsed integer.
|
||||||
|
func Int32P(val string) (*int32, error) {
|
||||||
|
i, err := Int32(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Int32(i), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint64P parses the given string representation of an integer
|
||||||
|
// and returns a pointer to a uint64 whose value is same as the parsed integer.
|
||||||
|
func Uint64P(val string) (*uint64, error) {
|
||||||
|
i, err := Uint64(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Uint64(i), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint32P parses the given string representation of an integer
|
||||||
|
// and returns a pointer to a uint32 whose value is same as the parsed integer.
|
||||||
|
func Uint32P(val string) (*uint32, error) {
|
||||||
|
i, err := Uint32(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Uint32(i), err
|
||||||
|
}
|
237
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
generated
vendored
Normal file
237
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
generated
vendored
Normal file
|
@ -0,0 +1,237 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"github.com/grpc-ecosystem/grpc-gateway/utilities"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PopulateQueryParameters populates "values" into "msg".
|
||||||
|
// A value is ignored if its key starts with one of the elements in "filter".
|
||||||
|
func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
|
||||||
|
for key, values := range values {
|
||||||
|
fieldPath := strings.Split(key, ".")
|
||||||
|
if filter.HasCommonPrefix(fieldPath) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PopulateFieldFromPath sets a value in a nested Protobuf structure.
|
||||||
|
// It instantiates missing protobuf fields as it goes.
|
||||||
|
func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
|
||||||
|
fieldPath := strings.Split(fieldPathString, ".")
|
||||||
|
return populateFieldValueFromPath(msg, fieldPath, []string{value})
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error {
|
||||||
|
m := reflect.ValueOf(msg)
|
||||||
|
if m.Kind() != reflect.Ptr {
|
||||||
|
return fmt.Errorf("unexpected type %T: %v", msg, msg)
|
||||||
|
}
|
||||||
|
var props *proto.Properties
|
||||||
|
m = m.Elem()
|
||||||
|
for i, fieldName := range fieldPath {
|
||||||
|
isLast := i == len(fieldPath)-1
|
||||||
|
if !isLast && m.Kind() != reflect.Struct {
|
||||||
|
return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, "."))
|
||||||
|
}
|
||||||
|
var f reflect.Value
|
||||||
|
var err error
|
||||||
|
f, props, err = fieldByProtoName(m, fieldName)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
} else if !f.IsValid() {
|
||||||
|
grpclog.Printf("field not found in %T: %s", msg, strings.Join(fieldPath, "."))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
switch f.Kind() {
|
||||||
|
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64:
|
||||||
|
if !isLast {
|
||||||
|
return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
|
||||||
|
}
|
||||||
|
m = f
|
||||||
|
case reflect.Slice:
|
||||||
|
// TODO(yugui) Support []byte
|
||||||
|
if !isLast {
|
||||||
|
return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, "."))
|
||||||
|
}
|
||||||
|
return populateRepeatedField(f, values, props)
|
||||||
|
case reflect.Ptr:
|
||||||
|
if f.IsNil() {
|
||||||
|
m = reflect.New(f.Type().Elem())
|
||||||
|
f.Set(m.Convert(f.Type()))
|
||||||
|
}
|
||||||
|
m = f.Elem()
|
||||||
|
continue
|
||||||
|
case reflect.Struct:
|
||||||
|
m = f
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("unexpected type %s in %T", f.Type(), msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
switch len(values) {
|
||||||
|
case 0:
|
||||||
|
return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, "."))
|
||||||
|
case 1:
|
||||||
|
default:
|
||||||
|
grpclog.Printf("too many field values: %s", strings.Join(fieldPath, "."))
|
||||||
|
}
|
||||||
|
return populateField(m, values[0], props)
|
||||||
|
}
|
||||||
|
|
||||||
|
// fieldByProtoName looks up a field whose corresponding protobuf field name is "name".
|
||||||
|
// "m" must be a struct value. It returns zero reflect.Value if no such field found.
|
||||||
|
func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) {
|
||||||
|
props := proto.GetProperties(m.Type())
|
||||||
|
|
||||||
|
// look up field name in oneof map
|
||||||
|
if op, ok := props.OneofTypes[name]; ok {
|
||||||
|
v := reflect.New(op.Type.Elem())
|
||||||
|
field := m.Field(op.Field)
|
||||||
|
if !field.IsNil() {
|
||||||
|
return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName)
|
||||||
|
}
|
||||||
|
field.Set(v)
|
||||||
|
return v.Elem().Field(0), op.Prop, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, p := range props.Prop {
|
||||||
|
if p.OrigName == name {
|
||||||
|
return m.FieldByName(p.Name), p, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return reflect.Value{}, nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error {
|
||||||
|
elemType := f.Type().Elem()
|
||||||
|
|
||||||
|
// is the destination field a slice of an enumeration type?
|
||||||
|
if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
|
||||||
|
return populateFieldEnumRepeated(f, values, enumValMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
conv, ok := convFromType[elemType.Kind()]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unsupported field type %s", elemType)
|
||||||
|
}
|
||||||
|
f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
|
||||||
|
for i, v := range values {
|
||||||
|
result := conv.Call([]reflect.Value{reflect.ValueOf(v)})
|
||||||
|
if err := result[1].Interface(); err != nil {
|
||||||
|
return err.(error)
|
||||||
|
}
|
||||||
|
f.Index(i).Set(result[0].Convert(f.Index(i).Type()))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateField(f reflect.Value, value string, props *proto.Properties) error {
|
||||||
|
// Handle well known type
|
||||||
|
type wkt interface {
|
||||||
|
XXX_WellKnownType() string
|
||||||
|
}
|
||||||
|
if wkt, ok := f.Addr().Interface().(wkt); ok {
|
||||||
|
switch wkt.XXX_WellKnownType() {
|
||||||
|
case "Timestamp":
|
||||||
|
if value == "null" {
|
||||||
|
f.Field(0).SetInt(0)
|
||||||
|
f.Field(1).SetInt(0)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
t, err := time.Parse(time.RFC3339Nano, value)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("bad Timestamp: %v", err)
|
||||||
|
}
|
||||||
|
f.Field(0).SetInt(int64(t.Unix()))
|
||||||
|
f.Field(1).SetInt(int64(t.Nanosecond()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// is the destination field an enumeration type?
|
||||||
|
if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
|
||||||
|
return populateFieldEnum(f, value, enumValMap)
|
||||||
|
}
|
||||||
|
|
||||||
|
conv, ok := convFromType[f.Kind()]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unsupported field type %T", f)
|
||||||
|
}
|
||||||
|
result := conv.Call([]reflect.Value{reflect.ValueOf(value)})
|
||||||
|
if err := result[1].Interface(); err != nil {
|
||||||
|
return err.(error)
|
||||||
|
}
|
||||||
|
f.Set(result[0].Convert(f.Type()))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) {
|
||||||
|
// see if it's an enumeration string
|
||||||
|
if enumVal, ok := enumValMap[value]; ok {
|
||||||
|
return reflect.ValueOf(enumVal).Convert(t), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// check for an integer that matches an enumeration value
|
||||||
|
eVal, err := strconv.Atoi(value)
|
||||||
|
if err != nil {
|
||||||
|
return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
|
||||||
|
}
|
||||||
|
for _, v := range enumValMap {
|
||||||
|
if v == int32(eVal) {
|
||||||
|
return reflect.ValueOf(eVal).Convert(t), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error {
|
||||||
|
cval, err := convertEnum(value, f.Type(), enumValMap)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f.Set(cval)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error {
|
||||||
|
elemType := f.Type().Elem()
|
||||||
|
f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
|
||||||
|
for i, v := range values {
|
||||||
|
result, err := convertEnum(v, elemType, enumValMap)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f.Index(i).Set(result)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
convFromType = map[reflect.Kind]reflect.Value{
|
||||||
|
reflect.String: reflect.ValueOf(String),
|
||||||
|
reflect.Bool: reflect.ValueOf(Bool),
|
||||||
|
reflect.Float64: reflect.ValueOf(Float64),
|
||||||
|
reflect.Float32: reflect.ValueOf(Float32),
|
||||||
|
reflect.Int64: reflect.ValueOf(Int64),
|
||||||
|
reflect.Int32: reflect.ValueOf(Int32),
|
||||||
|
reflect.Uint64: reflect.ValueOf(Uint64),
|
||||||
|
reflect.Uint32: reflect.ValueOf(Uint32),
|
||||||
|
// TODO(yugui) Support []byte
|
||||||
|
}
|
||||||
|
)
|
2
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go
generated
vendored
Normal file
2
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
||||||
|
// Package utilities provides members for internal use in grpc-gateway.
|
||||||
|
package utilities
|
22
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go
generated
vendored
Normal file
22
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
package utilities
|
||||||
|
|
||||||
|
// An OpCode is a opcode of compiled path patterns.
|
||||||
|
type OpCode int
|
||||||
|
|
||||||
|
// These constants are the valid values of OpCode.
|
||||||
|
const (
|
||||||
|
// OpNop does nothing
|
||||||
|
OpNop = OpCode(iota)
|
||||||
|
// OpPush pushes a component to stack
|
||||||
|
OpPush
|
||||||
|
// OpLitPush pushes a component to stack if it matches to the literal
|
||||||
|
OpLitPush
|
||||||
|
// OpPushM concatenates the remaining components and pushes it to stack
|
||||||
|
OpPushM
|
||||||
|
// OpConcatN pops N items from stack, concatenates them and pushes it back to stack
|
||||||
|
OpConcatN
|
||||||
|
// OpCapture pops an item and binds it to the variable
|
||||||
|
OpCapture
|
||||||
|
// OpEnd is the least postive invalid opcode.
|
||||||
|
OpEnd
|
||||||
|
)
|
177
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go
generated
vendored
Normal file
177
vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go
generated
vendored
Normal file
|
@ -0,0 +1,177 @@
|
||||||
|
package utilities
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DoubleArray is a Double Array implementation of trie on sequences of strings.
|
||||||
|
type DoubleArray struct {
|
||||||
|
// Encoding keeps an encoding from string to int
|
||||||
|
Encoding map[string]int
|
||||||
|
// Base is the base array of Double Array
|
||||||
|
Base []int
|
||||||
|
// Check is the check array of Double Array
|
||||||
|
Check []int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDoubleArray builds a DoubleArray from a set of sequences of strings.
|
||||||
|
func NewDoubleArray(seqs [][]string) *DoubleArray {
|
||||||
|
da := &DoubleArray{Encoding: make(map[string]int)}
|
||||||
|
if len(seqs) == 0 {
|
||||||
|
return da
|
||||||
|
}
|
||||||
|
|
||||||
|
encoded := registerTokens(da, seqs)
|
||||||
|
sort.Sort(byLex(encoded))
|
||||||
|
|
||||||
|
root := node{row: -1, col: -1, left: 0, right: len(encoded)}
|
||||||
|
addSeqs(da, encoded, 0, root)
|
||||||
|
|
||||||
|
for i := len(da.Base); i > 0; i-- {
|
||||||
|
if da.Check[i-1] != 0 {
|
||||||
|
da.Base = da.Base[:i]
|
||||||
|
da.Check = da.Check[:i]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return da
|
||||||
|
}
|
||||||
|
|
||||||
|
func registerTokens(da *DoubleArray, seqs [][]string) [][]int {
|
||||||
|
var result [][]int
|
||||||
|
for _, seq := range seqs {
|
||||||
|
var encoded []int
|
||||||
|
for _, token := range seq {
|
||||||
|
if _, ok := da.Encoding[token]; !ok {
|
||||||
|
da.Encoding[token] = len(da.Encoding)
|
||||||
|
}
|
||||||
|
encoded = append(encoded, da.Encoding[token])
|
||||||
|
}
|
||||||
|
result = append(result, encoded)
|
||||||
|
}
|
||||||
|
for i := range result {
|
||||||
|
result[i] = append(result[i], len(da.Encoding))
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
type node struct {
|
||||||
|
row, col int
|
||||||
|
left, right int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n node) value(seqs [][]int) int {
|
||||||
|
return seqs[n.row][n.col]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n node) children(seqs [][]int) []*node {
|
||||||
|
var result []*node
|
||||||
|
lastVal := int(-1)
|
||||||
|
last := new(node)
|
||||||
|
for i := n.left; i < n.right; i++ {
|
||||||
|
if lastVal == seqs[i][n.col+1] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
last.right = i
|
||||||
|
last = &node{
|
||||||
|
row: i,
|
||||||
|
col: n.col + 1,
|
||||||
|
left: i,
|
||||||
|
}
|
||||||
|
result = append(result, last)
|
||||||
|
}
|
||||||
|
last.right = n.right
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) {
|
||||||
|
ensureSize(da, pos)
|
||||||
|
|
||||||
|
children := n.children(seqs)
|
||||||
|
var i int
|
||||||
|
for i = 1; ; i++ {
|
||||||
|
ok := func() bool {
|
||||||
|
for _, child := range children {
|
||||||
|
code := child.value(seqs)
|
||||||
|
j := i + code
|
||||||
|
ensureSize(da, j)
|
||||||
|
if da.Check[j] != 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}()
|
||||||
|
if ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
da.Base[pos] = i
|
||||||
|
for _, child := range children {
|
||||||
|
code := child.value(seqs)
|
||||||
|
j := i + code
|
||||||
|
da.Check[j] = pos + 1
|
||||||
|
}
|
||||||
|
terminator := len(da.Encoding)
|
||||||
|
for _, child := range children {
|
||||||
|
code := child.value(seqs)
|
||||||
|
if code == terminator {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
j := i + code
|
||||||
|
addSeqs(da, seqs, j, *child)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ensureSize(da *DoubleArray, i int) {
|
||||||
|
for i >= len(da.Base) {
|
||||||
|
da.Base = append(da.Base, make([]int, len(da.Base)+1)...)
|
||||||
|
da.Check = append(da.Check, make([]int, len(da.Check)+1)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type byLex [][]int
|
||||||
|
|
||||||
|
func (l byLex) Len() int { return len(l) }
|
||||||
|
func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
||||||
|
func (l byLex) Less(i, j int) bool {
|
||||||
|
si := l[i]
|
||||||
|
sj := l[j]
|
||||||
|
var k int
|
||||||
|
for k = 0; k < len(si) && k < len(sj); k++ {
|
||||||
|
if si[k] < sj[k] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if si[k] > sj[k] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if k < len(sj) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence.
|
||||||
|
func (da *DoubleArray) HasCommonPrefix(seq []string) bool {
|
||||||
|
if len(da.Base) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var i int
|
||||||
|
for _, t := range seq {
|
||||||
|
code, ok := da.Encoding[t]
|
||||||
|
if !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
j := da.Base[i] + code
|
||||||
|
if len(da.Check) <= j || da.Check[j] != i+1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
i = j
|
||||||
|
}
|
||||||
|
j := da.Base[i] + len(da.Encoding)
|
||||||
|
if len(da.Check) <= j || da.Check[j] != i+1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
202
vendor/github.com/petermattis/goid/LICENSE
generated
vendored
Normal file
202
vendor/github.com/petermattis/goid/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,202 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner}
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
|
4
vendor/github.com/petermattis/goid/README.md
generated
vendored
Normal file
4
vendor/github.com/petermattis/goid/README.md
generated
vendored
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
# goid [![Build Status](https://travis-ci.org/petermattis/goid.svg?branch=master)](https://travis-ci.org/petermattis/goid)
|
||||||
|
|
||||||
|
Programatically retrieve the current goroutine's ID. See [the CI
|
||||||
|
configuration](.travis.yml) for supported Go versions.
|
35
vendor/github.com/petermattis/goid/goid.go
generated
vendored
Normal file
35
vendor/github.com/petermattis/goid/goid.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
// Copyright 2016 Peter Mattis.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
// implied. See the License for the specific language governing
|
||||||
|
// permissions and limitations under the License. See the AUTHORS file
|
||||||
|
// for names of contributors.
|
||||||
|
|
||||||
|
package goid
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
func ExtractGID(s []byte) int64 {
|
||||||
|
s = s[len("goroutine "):]
|
||||||
|
s = s[:bytes.IndexByte(s, ' ')]
|
||||||
|
gid, _ := strconv.ParseInt(string(s), 10, 64)
|
||||||
|
return gid
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the goid from runtime.Stack() output. Slow, but it works.
|
||||||
|
func getSlow() int64 {
|
||||||
|
var buf [64]byte
|
||||||
|
return ExtractGID(buf[:runtime.Stack(buf[:], false)])
|
||||||
|
}
|
23
vendor/github.com/petermattis/goid/goid_go1.3.c
generated
vendored
Normal file
23
vendor/github.com/petermattis/goid/goid_go1.3.c
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
// Copyright 2015 Peter Mattis.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
// implied. See the License for the specific language governing
|
||||||
|
// permissions and limitations under the License. See the AUTHORS file
|
||||||
|
// for names of contributors.
|
||||||
|
|
||||||
|
// +build !go1.4
|
||||||
|
|
||||||
|
#include <runtime.h>
|
||||||
|
|
||||||
|
void ·Get(int64 ret) {
|
||||||
|
ret = g->goid;
|
||||||
|
USED(&ret);
|
||||||
|
}
|
21
vendor/github.com/petermattis/goid/goid_go1.3.go
generated
vendored
Normal file
21
vendor/github.com/petermattis/goid/goid_go1.3.go
generated
vendored
Normal file
|
@ -0,0 +1,21 @@
|
||||||
|
// Copyright 2015 Peter Mattis.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
// implied. See the License for the specific language governing
|
||||||
|
// permissions and limitations under the License. See the AUTHORS file
|
||||||
|
// for names of contributors.
|
||||||
|
|
||||||
|
// +build !go1.4
|
||||||
|
|
||||||
|
package goid
|
||||||
|
|
||||||
|
// Get returns the id of the current goroutine.
|
||||||
|
func Get() int64
|
34
vendor/github.com/petermattis/goid/goid_go1.4.go
generated
vendored
Normal file
34
vendor/github.com/petermattis/goid/goid_go1.4.go
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
// Copyright 2015 Peter Mattis.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
// implied. See the License for the specific language governing
|
||||||
|
// permissions and limitations under the License. See the AUTHORS file
|
||||||
|
// for names of contributors.
|
||||||
|
|
||||||
|
// +build go1.4,!go1.5
|
||||||
|
|
||||||
|
package goid
|
||||||
|
|
||||||
|
import "unsafe"
|
||||||
|
|
||||||
|
var pointerSize = unsafe.Sizeof(uintptr(0))
|
||||||
|
|
||||||
|
// Backdoor access to runtime·getg().
|
||||||
|
func getg() uintptr // in goid_go1.4.s
|
||||||
|
|
||||||
|
// Get returns the id of the current goroutine.
|
||||||
|
func Get() int64 {
|
||||||
|
// The goid is the 16th field in the G struct where each field is a
|
||||||
|
// pointer, uintptr or padded to that size. See runtime.h from the
|
||||||
|
// Go sources. I'm not aware of a cleaner way to determine the
|
||||||
|
// offset.
|
||||||
|
return *(*int64)(unsafe.Pointer(getg() + 16*pointerSize))
|
||||||
|
}
|
18
vendor/github.com/petermattis/goid/goid_go1.4.s
generated
vendored
Normal file
18
vendor/github.com/petermattis/goid/goid_go1.4.s
generated
vendored
Normal file
|
@ -0,0 +1,18 @@
|
||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Assembly to get into package runtime without using exported symbols.
|
||||||
|
// See https://github.com/golang/go/blob/release-branch.go1.4/misc/cgo/test/backdoor/thunk.s
|
||||||
|
|
||||||
|
// +build amd64 amd64p32 arm 386
|
||||||
|
// +build go1.4,!go1.5
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
#ifdef GOARCH_arm
|
||||||
|
#define JMP B
|
||||||
|
#endif
|
||||||
|
|
||||||
|
TEXT ·getg(SB),NOSPLIT,$0-0
|
||||||
|
JMP runtime·getg(SB)
|
67
vendor/github.com/petermattis/goid/goid_go1.5.go
generated
vendored
Normal file
67
vendor/github.com/petermattis/goid/goid_go1.5.go
generated
vendored
Normal file
|
@ -0,0 +1,67 @@
|
||||||
|
// Copyright 2016 Peter Mattis.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
// implied. See the License for the specific language governing
|
||||||
|
// permissions and limitations under the License. See the AUTHORS file
|
||||||
|
// for names of contributors.
|
||||||
|
|
||||||
|
// +build amd64 amd64p32 arm
|
||||||
|
// +build go1.5,!go1.6
|
||||||
|
|
||||||
|
package goid
|
||||||
|
|
||||||
|
import "unsafe"
|
||||||
|
|
||||||
|
// Just enough of the structs from runtime/runtime2.go to get the offset to goid.
|
||||||
|
// See https://github.com/golang/go/blob/release-branch.go1.5/src/runtime/runtime2.go
|
||||||
|
|
||||||
|
type stack struct {
|
||||||
|
lo uintptr
|
||||||
|
hi uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
type gobuf struct {
|
||||||
|
sp uintptr
|
||||||
|
pc uintptr
|
||||||
|
g uintptr
|
||||||
|
ctxt uintptr
|
||||||
|
ret uintptr
|
||||||
|
lr uintptr
|
||||||
|
bp uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
type g struct {
|
||||||
|
stack stack
|
||||||
|
stackguard0 uintptr
|
||||||
|
stackguard1 uintptr
|
||||||
|
|
||||||
|
_panic uintptr
|
||||||
|
_defer uintptr
|
||||||
|
m uintptr
|
||||||
|
stackAlloc uintptr
|
||||||
|
sched gobuf
|
||||||
|
syscallsp uintptr
|
||||||
|
syscallpc uintptr
|
||||||
|
stkbar []uintptr
|
||||||
|
stkbarPos uintptr
|
||||||
|
param unsafe.Pointer
|
||||||
|
atomicstatus uint32
|
||||||
|
stackLock uint32
|
||||||
|
goid int64 // Here it is!
|
||||||
|
}
|
||||||
|
|
||||||
|
// Backdoor access to runtime·getg().
|
||||||
|
func getg() uintptr // in goid_go1.5plus.s
|
||||||
|
|
||||||
|
func Get() int64 {
|
||||||
|
gg := (*g)(unsafe.Pointer(getg()))
|
||||||
|
return gg.goid
|
||||||
|
}
|
27
vendor/github.com/petermattis/goid/goid_go1.5plus.s
generated
vendored
Normal file
27
vendor/github.com/petermattis/goid/goid_go1.5plus.s
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
// Copyright 2016 Peter Mattis.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
// implied. See the License for the specific language governing
|
||||||
|
// permissions and limitations under the License. See the AUTHORS file
|
||||||
|
// for names of contributors.
|
||||||
|
|
||||||
|
// Assembly to mimic runtime.getg.
|
||||||
|
|
||||||
|
// +build amd64 amd64p32
|
||||||
|
// +build go1.5
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
// func getg() uintptr
|
||||||
|
TEXT ·getg(SB),NOSPLIT,$0-8
|
||||||
|
MOVQ (TLS), BX
|
||||||
|
MOVQ BX, ret+0(FP)
|
||||||
|
RET
|
27
vendor/github.com/petermattis/goid/goid_go1.5plus_arm.s
generated
vendored
Normal file
27
vendor/github.com/petermattis/goid/goid_go1.5plus_arm.s
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
// Copyright 2016 Peter Mattis.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
// implied. See the License for the specific language governing
|
||||||
|
// permissions and limitations under the License. See the AUTHORS file
|
||||||
|
// for names of contributors.
|
||||||
|
|
||||||
|
// Assembly to mimic runtime.getg.
|
||||||
|
// This should work on arm64 as well, but it hasn't been tested.
|
||||||
|
|
||||||
|
// +build arm
|
||||||
|
// +build go1.5
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
// func getg() uintptr
|
||||||
|
TEXT ·getg(SB),NOSPLIT,$0-8
|
||||||
|
MOVW g, ret+0(FP)
|
||||||
|
RET
|
53
vendor/github.com/petermattis/goid/goid_go1.6plus.go
generated
vendored
Normal file
53
vendor/github.com/petermattis/goid/goid_go1.6plus.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
||||||
|
// +build amd64 amd64p32 arm
|
||||||
|
// +build go1.6,!go1.9
|
||||||
|
|
||||||
|
package goid
|
||||||
|
|
||||||
|
import "unsafe"
|
||||||
|
|
||||||
|
// Just enough of the structs from runtime/runtime2.go to get the offset to goid.
|
||||||
|
// See https://github.com/golang/go/blob/release-branch.go1.6/src/runtime/runtime2.go
|
||||||
|
|
||||||
|
type stack struct {
|
||||||
|
lo uintptr
|
||||||
|
hi uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
type gobuf struct {
|
||||||
|
sp uintptr
|
||||||
|
pc uintptr
|
||||||
|
g uintptr
|
||||||
|
ctxt uintptr
|
||||||
|
ret uintptr
|
||||||
|
lr uintptr
|
||||||
|
bp uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
type g struct {
|
||||||
|
stack stack
|
||||||
|
stackguard0 uintptr
|
||||||
|
stackguard1 uintptr
|
||||||
|
|
||||||
|
_panic uintptr
|
||||||
|
_defer uintptr
|
||||||
|
m uintptr
|
||||||
|
stackAlloc uintptr
|
||||||
|
sched gobuf
|
||||||
|
syscallsp uintptr
|
||||||
|
syscallpc uintptr
|
||||||
|
stkbar []uintptr
|
||||||
|
stkbarPos uintptr
|
||||||
|
stktopsp uintptr
|
||||||
|
param unsafe.Pointer
|
||||||
|
atomicstatus uint32
|
||||||
|
stackLock uint32
|
||||||
|
goid int64 // Here it is!
|
||||||
|
}
|
||||||
|
|
||||||
|
// Backdoor access to runtime·getg().
|
||||||
|
func getg() uintptr // in goid_go1.5plus{,_arm}.s
|
||||||
|
|
||||||
|
func Get() int64 {
|
||||||
|
gg := (*g)(unsafe.Pointer(getg()))
|
||||||
|
return gg.goid
|
||||||
|
}
|
47
vendor/github.com/petermattis/goid/goid_go1.9plus.go
generated
vendored
Normal file
47
vendor/github.com/petermattis/goid/goid_go1.9plus.go
generated
vendored
Normal file
|
@ -0,0 +1,47 @@
|
||||||
|
// +build amd64 amd64p32 arm
|
||||||
|
// +build go1.9
|
||||||
|
|
||||||
|
package goid
|
||||||
|
|
||||||
|
import "unsafe"
|
||||||
|
|
||||||
|
type stack struct {
|
||||||
|
lo uintptr
|
||||||
|
hi uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
type gobuf struct {
|
||||||
|
sp uintptr
|
||||||
|
pc uintptr
|
||||||
|
g uintptr
|
||||||
|
ctxt uintptr
|
||||||
|
ret uintptr
|
||||||
|
lr uintptr
|
||||||
|
bp uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
type g struct {
|
||||||
|
stack stack
|
||||||
|
stackguard0 uintptr
|
||||||
|
stackguard1 uintptr
|
||||||
|
|
||||||
|
_panic uintptr
|
||||||
|
_defer uintptr
|
||||||
|
m uintptr
|
||||||
|
sched gobuf
|
||||||
|
syscallsp uintptr
|
||||||
|
syscallpc uintptr
|
||||||
|
stktopsp uintptr
|
||||||
|
param unsafe.Pointer
|
||||||
|
atomicstatus uint32
|
||||||
|
stackLock uint32
|
||||||
|
goid int64 // Here it is!
|
||||||
|
}
|
||||||
|
|
||||||
|
// Backdoor access to runtime·getg().
|
||||||
|
func getg() uintptr // in goid_go1.5plus{,_arm}.s
|
||||||
|
|
||||||
|
func Get() int64 {
|
||||||
|
gg := (*g)(unsafe.Pointer(getg()))
|
||||||
|
return gg.goid
|
||||||
|
}
|
23
vendor/github.com/petermattis/goid/goid_slow.go
generated
vendored
Normal file
23
vendor/github.com/petermattis/goid/goid_slow.go
generated
vendored
Normal file
|
@ -0,0 +1,23 @@
|
||||||
|
// Copyright 2016 Peter Mattis.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
// implied. See the License for the specific language governing
|
||||||
|
// permissions and limitations under the License. See the AUTHORS file
|
||||||
|
// for names of contributors.
|
||||||
|
|
||||||
|
// +build go1.4,!go1.5,!amd64,!amd64p32,!arm,!386 go1.5,!go1.6,!amd64,!amd64p32,!arm go1.6,!amd64,!amd64p32,!arm go1.9,!amd64,!amd64p32,!arm
|
||||||
|
|
||||||
|
package goid
|
||||||
|
|
||||||
|
// Get returns the id of the current goroutine.
|
||||||
|
func Get() int64 {
|
||||||
|
return getSlow()
|
||||||
|
}
|
201
vendor/github.com/sasha-s/go-deadlock/LICENSE
generated
vendored
Normal file
201
vendor/github.com/sasha-s/go-deadlock/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner}
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
112
vendor/github.com/sasha-s/go-deadlock/Readme.md
generated
vendored
Normal file
112
vendor/github.com/sasha-s/go-deadlock/Readme.md
generated
vendored
Normal file
|
@ -0,0 +1,112 @@
|
||||||
|
# Online deadlock detection in go (golang). [Docs](https://godoc.org/github.com/sasha-s/go-deadlock). [![Build Status](https://travis-ci.org/sasha-s/go-deadlock.svg?branch=master)](https://travis-ci.org/sasha-s/go-deadlock)
|
||||||
|
## Why
|
||||||
|
Deadlocks happen and are painful to debug.
|
||||||
|
|
||||||
|
## What
|
||||||
|
go-deadlock provides (RW)Mutex drop-in replacements for sync.(RW)Mutex.
|
||||||
|
It would not work if you create a spaghetti of channels.
|
||||||
|
Mutexes only.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
```sh
|
||||||
|
go get github.com/sasha-s/go-deadlock/...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
```go
|
||||||
|
import "github.com/sasha-s/go-deadlock"
|
||||||
|
var mu deadlock.Mutex
|
||||||
|
// Use normally, it works exactly like sync.Mutex does.
|
||||||
|
mu.Lock()
|
||||||
|
|
||||||
|
defer mu.Unlock()
|
||||||
|
// Or
|
||||||
|
var rw deadlock.RWMutex
|
||||||
|
rw.RLock()
|
||||||
|
defer rw.RUnlock()
|
||||||
|
```
|
||||||
|
|
||||||
|
### Deadlocks
|
||||||
|
One of the most common sources of deadlocks is inconsistent lock ordering:
|
||||||
|
say, you have two mutexes A and B, and in some goroutines you have
|
||||||
|
```go
|
||||||
|
A.Lock() // defer A.Unlock() or similar.
|
||||||
|
...
|
||||||
|
B.Lock() // defer B.Unlock() or similar.
|
||||||
|
```
|
||||||
|
And in another goroutine the order of locks is reversed:
|
||||||
|
```go
|
||||||
|
B.Lock() // defer B.Unlock() or similar.
|
||||||
|
...
|
||||||
|
A.Lock() // defer A.Unlock() or similar.
|
||||||
|
```
|
||||||
|
This does not guarantee a deadlock (maybe the goroutines above can never be running at the same time), but it usually a design flaw at least.
|
||||||
|
|
||||||
|
go-deadlock can detect such cases (unless you cross goroutine boundary - say lock A, then spawn a goroutine, block until it is singals, and lock B inside of the goroutine), even if the deadlock itself happens very infrequently and is painful to reproduce!
|
||||||
|
|
||||||
|
Each time go-deadlock sees a lock attempt for lock B, it records the order A before B, for each lock that is currently being held in the same goroutine, and it prints (and exits the program by default) when it sees the locking order being violated.
|
||||||
|
|
||||||
|
In addition, if it sees that we are waiting on a lock for a long time (opts.DeadlockTimeout, 30 seconds by default), it reports a potential deadlock, also printing the stacktrace for a goroutine that is currently holding the lock we are desperately trying to grab.
|
||||||
|
|
||||||
|
|
||||||
|
## Sample output
|
||||||
|
####Inconsistent lock ordering:
|
||||||
|
```
|
||||||
|
POTENTIAL DEADLOCK: Inconsistent locking. saw this ordering in one goroutine:
|
||||||
|
happened before
|
||||||
|
inmem.go:623 bttest.(*server).ReadModifyWriteRow { r.mu.Lock() } <<<<<
|
||||||
|
inmem_test.go:118 bttest.TestConcurrentMutationsReadModifyAndGC.func4 { _, _ = s.ReadModifyWriteRow(ctx, rmw()) }
|
||||||
|
|
||||||
|
happened after
|
||||||
|
inmem.go:629 bttest.(*server).ReadModifyWriteRow { tbl.mu.RLock() } <<<<<
|
||||||
|
inmem_test.go:118 bttest.TestConcurrentMutationsReadModifyAndGC.func4 { _, _ = s.ReadModifyWriteRow(ctx, rmw()) }
|
||||||
|
|
||||||
|
in another goroutine: happened before
|
||||||
|
inmem.go:799 bttest.(*table).gc { t.mu.RLock() } <<<<<
|
||||||
|
inmem_test.go:125 bttest.TestConcurrentMutationsReadModifyAndGC.func5 { tbl.gc() }
|
||||||
|
|
||||||
|
happend after
|
||||||
|
inmem.go:814 bttest.(*table).gc { r.mu.Lock() } <<<<<
|
||||||
|
inmem_test.go:125 bttest.TestConcurrentMutationsReadModifyAndGC.func5 { tbl.gc() }
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Waiting for a lock for a long time:
|
||||||
|
|
||||||
|
```
|
||||||
|
POTENTIAL DEADLOCK:
|
||||||
|
Previous place where the lock was grabbed
|
||||||
|
goroutine 240 lock 0xc820160440
|
||||||
|
inmem.go:799 bttest.(*table).gc { t.mu.RLock() } <<<<<
|
||||||
|
inmem_test.go:125 bttest.TestConcurrentMutationsReadModifyAndGC.func5 { tbl.gc() }
|
||||||
|
|
||||||
|
Have been trying to lock it again for more than 40ms
|
||||||
|
goroutine 68 lock 0xc820160440
|
||||||
|
inmem.go:785 bttest.(*table).mutableRow { t.mu.Lock() } <<<<<
|
||||||
|
inmem.go:428 bttest.(*server).MutateRow { r := tbl.mutableRow(string(req.RowKey)) }
|
||||||
|
inmem_test.go:111 bttest.TestConcurrentMutationsReadModifyAndGC.func3 { s.MutateRow(ctx, req) }
|
||||||
|
|
||||||
|
|
||||||
|
Here is what goroutine 240 doing now
|
||||||
|
goroutine 240 [select]:
|
||||||
|
github.com/sasha-s/go-deadlock.lock(0xc82028ca10, 0x5189e0, 0xc82013a9b0)
|
||||||
|
/Users/sasha/go/src/github.com/sasha-s/go-deadlock/deadlock.go:163 +0x1640
|
||||||
|
github.com/sasha-s/go-deadlock.(*Mutex).Lock(0xc82013a9b0)
|
||||||
|
/Users/sasha/go/src/github.com/sasha-s/go-deadlock/deadlock.go:54 +0x86
|
||||||
|
google.golang.org/cloud/bigtable/bttest.(*table).gc(0xc820160440)
|
||||||
|
/Users/sasha/go/src/google.golang.org/cloud/bigtable/bttest/inmem.go:814 +0x28d
|
||||||
|
google.golang.org/cloud/bigtable/bttest.TestConcurrentMutationsReadModifyAndGC.func5(0xc82015c760, 0xc820160440) /Users/sasha/go/src/google.golang.org/cloud/bigtable/bttest/inmem_test.go:125 +0x48
|
||||||
|
created by google.golang.org/cloud/bigtable/bttest.TestConcurrentMutationsReadModifyAndGC
|
||||||
|
/Users/sasha/go/src/google.golang.org/cloud/bigtable/bttest/inmem_test.go:126 +0xb6f
|
||||||
|
```
|
||||||
|
|
||||||
|
## Used in
|
||||||
|
[cockroachdb: Potential deadlock between Gossip.SetStorage and Node.gossipStores](https://github.com/cockroachdb/cockroach/issues/7972)
|
||||||
|
|
||||||
|
[bigtable/bttest: A race between GC and row mutations](https://code-review.googlesource.com#/c/5301/)
|
||||||
|
|
||||||
|
## Need a mutex that works with net.context?
|
||||||
|
I have [one](https://github.com/sasha-s/go-csync).
|
||||||
|
|
||||||
|
## Warning: Black magic inside
|
||||||
|
go-deadlock is parsing goroutine ID from stacktraces for fun and profit.
|
||||||
|
See [Goroutine IDs](http://blog.sgmansfield.com/2015/12/goroutine-ids/) blog post by [Scott Mansfield](http://blog.sgmansfield.com).
|
297
vendor/github.com/sasha-s/go-deadlock/deadlock.go
generated
vendored
Normal file
297
vendor/github.com/sasha-s/go-deadlock/deadlock.go
generated
vendored
Normal file
|
@ -0,0 +1,297 @@
|
||||||
|
package deadlock
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/petermattis/goid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Opts control how deadlock detection behaves.
|
||||||
|
// Options are supposed to be set once at a startup (say, when parsing flags).
|
||||||
|
var Opts = struct {
|
||||||
|
// Mutex/RWMutex would work exactly as their sync counterparts
|
||||||
|
// -- almost no runtime penalty, no deadlock detection if Disable == true.
|
||||||
|
Disable bool
|
||||||
|
// Would disable lock order based deadlock detection if DisableLockOrderDetection == true.
|
||||||
|
DisableLockOrderDetection bool
|
||||||
|
// Waiting for a lock for longer than DeadlockTimeout is considered a deadlock.
|
||||||
|
// Ignored is DeadlockTimeout <= 0.
|
||||||
|
DeadlockTimeout time.Duration
|
||||||
|
// OnPotentialDeadlock is called each time a potential deadlock is deetcted -- either based on
|
||||||
|
// lock order or on lock wait time.
|
||||||
|
OnPotentialDeadlock func()
|
||||||
|
// Will keep MaxMapSize lock pairs (happens before // happens after) in the map.
|
||||||
|
// The map resets once the threshold is reached.
|
||||||
|
MaxMapSize int
|
||||||
|
// Will print to deadlock info to log buffer.
|
||||||
|
LogBuf io.Writer
|
||||||
|
}{
|
||||||
|
DeadlockTimeout: time.Second * 30,
|
||||||
|
OnPotentialDeadlock: func() {
|
||||||
|
os.Exit(2)
|
||||||
|
},
|
||||||
|
MaxMapSize: 1024 * 64,
|
||||||
|
LogBuf: os.Stderr,
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Mutex is a drop-in replacement for sync.Mutex.
|
||||||
|
// Performs deadlock detection unless disabled in Opts.
|
||||||
|
type Mutex struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock locks the mutex.
|
||||||
|
// If the lock is already in use, the calling goroutine
|
||||||
|
// blocks until the mutex is available.
|
||||||
|
//
|
||||||
|
// Unless deadlock detection is disabled, logs potential deadlocks to stderr,
|
||||||
|
// calling Opts.OnPotentialDeadlock on each occasion.
|
||||||
|
func (m *Mutex) Lock() {
|
||||||
|
lock(m.mu.Lock, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unlock unlocks the mutex.
|
||||||
|
// It is a run-time error if m is not locked on entry to Unlock.
|
||||||
|
//
|
||||||
|
// A locked Mutex is not associated with a particular goroutine.
|
||||||
|
// It is allowed for one goroutine to lock a Mutex and then
|
||||||
|
// arrange for another goroutine to unlock it.
|
||||||
|
func (m *Mutex) Unlock() {
|
||||||
|
m.mu.Unlock()
|
||||||
|
if !Opts.Disable {
|
||||||
|
PostUnlock(m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// An RWMutex is a drop-in replacement for sync.RWMutex.
|
||||||
|
// Performs deadlock detection unless disabled in Opts.
|
||||||
|
type RWMutex struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lock locks rw for writing.
|
||||||
|
// If the lock is already locked for reading or writing,
|
||||||
|
// Lock blocks until the lock is available.
|
||||||
|
// To ensure that the lock eventually becomes available,
|
||||||
|
// a blocked Lock call excludes new readers from acquiring
|
||||||
|
// the lock.
|
||||||
|
//
|
||||||
|
// Unless deadlock detection is disabled, logs potential deadlocks to stderr,
|
||||||
|
// calling Opts.OnPotentialDeadlock on each occasion.
|
||||||
|
func (m *RWMutex) Lock() {
|
||||||
|
lock(m.mu.Lock, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unlock unlocks the mutex for writing. It is a run-time error if rw is
|
||||||
|
// not locked for writing on entry to Unlock.
|
||||||
|
//
|
||||||
|
// As with Mutexes, a locked RWMutex is not associated with a particular
|
||||||
|
// goroutine. One goroutine may RLock (Lock) an RWMutex and then
|
||||||
|
// arrange for another goroutine to RUnlock (Unlock) it.
|
||||||
|
func (m *RWMutex) Unlock() {
|
||||||
|
m.mu.Unlock()
|
||||||
|
if !Opts.Disable {
|
||||||
|
PostUnlock(m)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RLock locks the mutex for reading.
|
||||||
|
//
|
||||||
|
// Unless deadlock detection is disabled, logs potential deadlocks to stderr,
|
||||||
|
// calling Opts.OnPotentialDeadlock on each occasion.
|
||||||
|
func (m *RWMutex) RLock() {
|
||||||
|
lock(m.mu.RLock, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RUnlock undoes a single RLock call;
|
||||||
|
// it does not affect other simultaneous readers.
|
||||||
|
// It is a run-time error if rw is not locked for reading
|
||||||
|
// on entry to RUnlock.
|
||||||
|
func (m *RWMutex) RUnlock() {
|
||||||
|
if !Opts.Disable {
|
||||||
|
PostUnlock(m)
|
||||||
|
}
|
||||||
|
m.mu.RUnlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// RLocker returns a Locker interface that implements
|
||||||
|
// the Lock and Unlock methods by calling RLock and RUnlock.
|
||||||
|
func (m *RWMutex) RLocker() sync.Locker {
|
||||||
|
return (*rlocker)(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
func PreLock(skip int, p interface{}) {
|
||||||
|
lo.PreLock(skip, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func PostLock(skip int, p interface{}) {
|
||||||
|
lo.PostLock(skip, p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func PostUnlock(p interface{}) {
|
||||||
|
lo.PostUnlock(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func lock(lockFn func(), ptr interface{}) {
|
||||||
|
if Opts.Disable {
|
||||||
|
lockFn()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
PreLock(4, ptr)
|
||||||
|
if Opts.DeadlockTimeout <= 0 {
|
||||||
|
lockFn()
|
||||||
|
} else {
|
||||||
|
ch := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
lockFn()
|
||||||
|
close(ch)
|
||||||
|
}()
|
||||||
|
for {
|
||||||
|
t := time.NewTimer(Opts.DeadlockTimeout)
|
||||||
|
defer t.Stop()
|
||||||
|
select {
|
||||||
|
case <-t.C:
|
||||||
|
lo.mu.Lock()
|
||||||
|
prev, ok := lo.cur[ptr]
|
||||||
|
if !ok {
|
||||||
|
lo.mu.Unlock()
|
||||||
|
break // Nobody seems to be holding a lock, try again.
|
||||||
|
}
|
||||||
|
fmt.Fprintln(Opts.LogBuf, header)
|
||||||
|
fmt.Fprintln(Opts.LogBuf, "Previous place where the lock was grabbed")
|
||||||
|
fmt.Fprintf(Opts.LogBuf, "goroutine %v lock %p\n", prev.gid, ptr)
|
||||||
|
printStack(Opts.LogBuf, prev.stack)
|
||||||
|
fmt.Fprintln(Opts.LogBuf, "Have been trying to lock it again for more than", Opts.DeadlockTimeout)
|
||||||
|
fmt.Fprintf(Opts.LogBuf, "goroutine %v lock %p\n", goid.Get(), ptr)
|
||||||
|
printStack(Opts.LogBuf, callers(2))
|
||||||
|
fmt.Fprintln(Opts.LogBuf)
|
||||||
|
stacks := stacks()
|
||||||
|
grs := bytes.Split(stacks, []byte("\n\n"))
|
||||||
|
for _, g := range grs {
|
||||||
|
if goid.ExtractGID(g) == prev.gid {
|
||||||
|
fmt.Fprintln(Opts.LogBuf, "Here is what goroutine", prev.gid, "doing now")
|
||||||
|
Opts.LogBuf.Write(g)
|
||||||
|
fmt.Fprintln(Opts.LogBuf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lo.other(ptr)
|
||||||
|
fmt.Fprintln(Opts.LogBuf, "All current goroutines:")
|
||||||
|
Opts.LogBuf.Write(stacks)
|
||||||
|
lo.mu.Unlock()
|
||||||
|
Opts.OnPotentialDeadlock()
|
||||||
|
<-ch
|
||||||
|
PostLock(4, ptr)
|
||||||
|
return
|
||||||
|
case <-ch:
|
||||||
|
PostLock(4, ptr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
PostLock(4, ptr)
|
||||||
|
}
|
||||||
|
|
||||||
|
type lockOrder struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
cur map[interface{}]stackGID // stacktraces + gids for the locks currently taken.
|
||||||
|
order map[beforeAfter]ss // expected order of locks.
|
||||||
|
}
|
||||||
|
|
||||||
|
type stackGID struct {
|
||||||
|
stack []uintptr
|
||||||
|
gid int64
|
||||||
|
}
|
||||||
|
|
||||||
|
type beforeAfter struct {
|
||||||
|
before interface{}
|
||||||
|
after interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type ss struct {
|
||||||
|
before []uintptr
|
||||||
|
after []uintptr
|
||||||
|
}
|
||||||
|
|
||||||
|
var lo = newLockOrder()
|
||||||
|
|
||||||
|
func newLockOrder() *lockOrder {
|
||||||
|
return &lockOrder{
|
||||||
|
cur: map[interface{}]stackGID{},
|
||||||
|
order: map[beforeAfter]ss{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lockOrder) PostLock(skip int, p interface{}) {
|
||||||
|
stack := callers(skip)
|
||||||
|
gid := goid.Get()
|
||||||
|
l.mu.Lock()
|
||||||
|
l.cur[p] = stackGID{stack, gid}
|
||||||
|
l.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lockOrder) PreLock(skip int, p interface{}) {
|
||||||
|
if Opts.DisableLockOrderDetection {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
stack := callers(skip)
|
||||||
|
gid := goid.Get()
|
||||||
|
l.mu.Lock()
|
||||||
|
for b, bs := range l.cur {
|
||||||
|
if b == p {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if bs.gid != gid { // We want locks taken in the same goroutine only.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if s, ok := l.order[beforeAfter{p, b}]; ok {
|
||||||
|
fmt.Fprintln(Opts.LogBuf, header, "Inconsistent locking. saw this ordering in one goroutine:")
|
||||||
|
fmt.Fprintln(Opts.LogBuf, "happened before")
|
||||||
|
printStack(Opts.LogBuf, s.before)
|
||||||
|
fmt.Fprintln(Opts.LogBuf, "happened after")
|
||||||
|
printStack(Opts.LogBuf, s.after)
|
||||||
|
fmt.Fprintln(Opts.LogBuf, "in another goroutine: happened before")
|
||||||
|
printStack(Opts.LogBuf, bs.stack)
|
||||||
|
fmt.Fprintln(Opts.LogBuf, "happend after")
|
||||||
|
printStack(Opts.LogBuf, stack)
|
||||||
|
l.other(p)
|
||||||
|
Opts.OnPotentialDeadlock()
|
||||||
|
}
|
||||||
|
l.order[beforeAfter{b, p}] = ss{bs.stack, stack}
|
||||||
|
if len(l.order) == Opts.MaxMapSize { // Reset the map to keep memory footprint bounded.
|
||||||
|
l.order = map[beforeAfter]ss{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
l.cur[p] = stackGID{stack, gid}
|
||||||
|
l.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l *lockOrder) PostUnlock(p interface{}) {
|
||||||
|
l.mu.Lock()
|
||||||
|
delete(l.cur, p)
|
||||||
|
l.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
type rlocker RWMutex
|
||||||
|
|
||||||
|
func (r *rlocker) Lock() { (*RWMutex)(r).RLock() }
|
||||||
|
func (r *rlocker) Unlock() { (*RWMutex)(r).RUnlock() }
|
||||||
|
|
||||||
|
// Under lo.mu Locked.
|
||||||
|
func (l *lockOrder) other(ptr interface{}) {
|
||||||
|
fmt.Fprintln(Opts.LogBuf, "\nOther goroutines holding locks:")
|
||||||
|
for k, pp := range l.cur {
|
||||||
|
if k == ptr {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fmt.Fprintf(Opts.LogBuf, "goroutine %v lock %p\n", pp.gid, k)
|
||||||
|
printStack(Opts.LogBuf, pp.stack)
|
||||||
|
}
|
||||||
|
fmt.Fprintln(Opts.LogBuf)
|
||||||
|
}
|
||||||
|
|
||||||
|
const header = "POTENTIAL DEADLOCK:"
|
107
vendor/github.com/sasha-s/go-deadlock/stacktraces.go
generated
vendored
Normal file
107
vendor/github.com/sasha-s/go-deadlock/stacktraces.go
generated
vendored
Normal file
|
@ -0,0 +1,107 @@
|
||||||
|
package deadlock
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"os/user"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
func callers(skip int) []uintptr {
|
||||||
|
s := make([]uintptr, 50) // Most relevant context seem to appear near the top of the stack.
|
||||||
|
return s[:runtime.Callers(2+skip, s)]
|
||||||
|
}
|
||||||
|
|
||||||
|
func printStack(w io.Writer, stack []uintptr) {
|
||||||
|
home := os.Getenv("HOME")
|
||||||
|
usr, err := user.Current()
|
||||||
|
if err == nil {
|
||||||
|
home = usr.HomeDir
|
||||||
|
}
|
||||||
|
cwd, _ := os.Getwd()
|
||||||
|
|
||||||
|
for i, pc := range stack {
|
||||||
|
f := runtime.FuncForPC(pc)
|
||||||
|
name := f.Name()
|
||||||
|
pkg := ""
|
||||||
|
if pos := strings.LastIndex(name, "/"); pos >= 0 {
|
||||||
|
name = name[pos+1:]
|
||||||
|
}
|
||||||
|
if pos := strings.Index(name, "."); pos >= 0 {
|
||||||
|
pkg = name[:pos]
|
||||||
|
name = name[pos+1:]
|
||||||
|
}
|
||||||
|
file, line := f.FileLine(pc - 1)
|
||||||
|
if (pkg == "runtime" && name == "goexit") || (pkg == "testing" && name == "tRunner") {
|
||||||
|
fmt.Fprintln(w)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
tail := ""
|
||||||
|
if i == 0 {
|
||||||
|
tail = " <<<<<" // Make the line performing a lock prominent.
|
||||||
|
}
|
||||||
|
// Shorten the file name.
|
||||||
|
clean := file
|
||||||
|
if cwd != "" {
|
||||||
|
cl, err := filepath.Rel(cwd, file)
|
||||||
|
if err == nil {
|
||||||
|
clean = cl
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if home != "" {
|
||||||
|
s2 := strings.Replace(file, home, "~", 1)
|
||||||
|
if len(clean) > len(s2) {
|
||||||
|
clean = s2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fmt.Fprintf(w, "%s:%d %s.%s %s%s\n", clean, line, pkg, name, code(file, line), tail)
|
||||||
|
}
|
||||||
|
fmt.Fprintln(w)
|
||||||
|
}
|
||||||
|
|
||||||
|
var fileSources struct {
|
||||||
|
sync.Mutex
|
||||||
|
lines map[string][][]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reads souce file lines from disk if not cached already.
|
||||||
|
func getSourceLines(file string) [][]byte {
|
||||||
|
fileSources.Lock()
|
||||||
|
defer fileSources.Unlock()
|
||||||
|
if fileSources.lines == nil {
|
||||||
|
fileSources.lines = map[string][][]byte{}
|
||||||
|
}
|
||||||
|
if lines, ok := fileSources.lines[file]; ok {
|
||||||
|
return lines
|
||||||
|
}
|
||||||
|
text, _ := ioutil.ReadFile(file)
|
||||||
|
fileSources.lines[file] = bytes.Split(text, []byte{'\n'})
|
||||||
|
return fileSources.lines[file]
|
||||||
|
}
|
||||||
|
|
||||||
|
func code(file string, line int) string {
|
||||||
|
lines := getSourceLines(file)
|
||||||
|
// lines are 1 based.
|
||||||
|
if line >= len(lines) || line <= 0 {
|
||||||
|
return "???"
|
||||||
|
}
|
||||||
|
return "{ " + string(bytes.TrimSpace(lines[line-1])) + " }"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stacktraces for all goroutines.
|
||||||
|
func stacks() []byte {
|
||||||
|
buf := make([]byte, 1024*16)
|
||||||
|
for {
|
||||||
|
n := runtime.Stack(buf, true)
|
||||||
|
if n < len(buf) {
|
||||||
|
return buf[:n]
|
||||||
|
}
|
||||||
|
buf = make([]byte, 2*len(buf))
|
||||||
|
}
|
||||||
|
}
|
641
vendor/golang.org/x/net/http2/ciphers.go
generated
vendored
Normal file
641
vendor/golang.org/x/net/http2/ciphers.go
generated
vendored
Normal file
|
@ -0,0 +1,641 @@
|
||||||
|
// Copyright 2017 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
// A list of the possible cipher suite ids. Taken from
|
||||||
|
// http://www.iana.org/assignments/tls-parameters/tls-parameters.txt
|
||||||
|
|
||||||
|
const (
|
||||||
|
cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000
|
||||||
|
cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001
|
||||||
|
cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002
|
||||||
|
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003
|
||||||
|
cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004
|
||||||
|
cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
|
||||||
|
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006
|
||||||
|
cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007
|
||||||
|
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008
|
||||||
|
cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009
|
||||||
|
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A
|
||||||
|
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B
|
||||||
|
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C
|
||||||
|
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D
|
||||||
|
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E
|
||||||
|
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F
|
||||||
|
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010
|
||||||
|
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011
|
||||||
|
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012
|
||||||
|
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013
|
||||||
|
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014
|
||||||
|
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015
|
||||||
|
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016
|
||||||
|
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017
|
||||||
|
cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018
|
||||||
|
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019
|
||||||
|
cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A
|
||||||
|
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B
|
||||||
|
// Reserved uint16 = 0x001C-1D
|
||||||
|
cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E
|
||||||
|
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F
|
||||||
|
cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020
|
||||||
|
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021
|
||||||
|
cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022
|
||||||
|
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023
|
||||||
|
cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024
|
||||||
|
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025
|
||||||
|
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026
|
||||||
|
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027
|
||||||
|
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028
|
||||||
|
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029
|
||||||
|
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A
|
||||||
|
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B
|
||||||
|
cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C
|
||||||
|
cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D
|
||||||
|
cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E
|
||||||
|
cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F
|
||||||
|
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030
|
||||||
|
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031
|
||||||
|
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032
|
||||||
|
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033
|
||||||
|
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034
|
||||||
|
cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
|
||||||
|
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036
|
||||||
|
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037
|
||||||
|
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038
|
||||||
|
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039
|
||||||
|
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A
|
||||||
|
cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B
|
||||||
|
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C
|
||||||
|
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D
|
||||||
|
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E
|
||||||
|
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F
|
||||||
|
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040
|
||||||
|
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041
|
||||||
|
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042
|
||||||
|
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043
|
||||||
|
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044
|
||||||
|
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045
|
||||||
|
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046
|
||||||
|
// Reserved uint16 = 0x0047-4F
|
||||||
|
// Reserved uint16 = 0x0050-58
|
||||||
|
// Reserved uint16 = 0x0059-5C
|
||||||
|
// Unassigned uint16 = 0x005D-5F
|
||||||
|
// Reserved uint16 = 0x0060-66
|
||||||
|
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067
|
||||||
|
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068
|
||||||
|
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069
|
||||||
|
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A
|
||||||
|
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B
|
||||||
|
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C
|
||||||
|
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D
|
||||||
|
// Unassigned uint16 = 0x006E-83
|
||||||
|
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084
|
||||||
|
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085
|
||||||
|
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086
|
||||||
|
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087
|
||||||
|
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088
|
||||||
|
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089
|
||||||
|
cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A
|
||||||
|
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B
|
||||||
|
cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C
|
||||||
|
cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D
|
||||||
|
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E
|
||||||
|
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F
|
||||||
|
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090
|
||||||
|
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091
|
||||||
|
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092
|
||||||
|
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093
|
||||||
|
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094
|
||||||
|
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095
|
||||||
|
cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096
|
||||||
|
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097
|
||||||
|
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098
|
||||||
|
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099
|
||||||
|
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A
|
||||||
|
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B
|
||||||
|
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C
|
||||||
|
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D
|
||||||
|
cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E
|
||||||
|
cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F
|
||||||
|
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0
|
||||||
|
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1
|
||||||
|
cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2
|
||||||
|
cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3
|
||||||
|
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4
|
||||||
|
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5
|
||||||
|
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6
|
||||||
|
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7
|
||||||
|
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8
|
||||||
|
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9
|
||||||
|
cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA
|
||||||
|
cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB
|
||||||
|
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC
|
||||||
|
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD
|
||||||
|
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE
|
||||||
|
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF
|
||||||
|
cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0
|
||||||
|
cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1
|
||||||
|
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2
|
||||||
|
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3
|
||||||
|
cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4
|
||||||
|
cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5
|
||||||
|
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6
|
||||||
|
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7
|
||||||
|
cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8
|
||||||
|
cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9
|
||||||
|
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA
|
||||||
|
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB
|
||||||
|
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC
|
||||||
|
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD
|
||||||
|
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE
|
||||||
|
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF
|
||||||
|
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0
|
||||||
|
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1
|
||||||
|
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2
|
||||||
|
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3
|
||||||
|
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4
|
||||||
|
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5
|
||||||
|
// Unassigned uint16 = 0x00C6-FE
|
||||||
|
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF
|
||||||
|
// Unassigned uint16 = 0x01-55,*
|
||||||
|
cipher_TLS_FALLBACK_SCSV uint16 = 0x5600
|
||||||
|
// Unassigned uint16 = 0x5601 - 0xC000
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014
|
||||||
|
cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015
|
||||||
|
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016
|
||||||
|
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017
|
||||||
|
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018
|
||||||
|
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019
|
||||||
|
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A
|
||||||
|
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B
|
||||||
|
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C
|
||||||
|
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D
|
||||||
|
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E
|
||||||
|
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F
|
||||||
|
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020
|
||||||
|
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021
|
||||||
|
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B
|
||||||
|
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C
|
||||||
|
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D
|
||||||
|
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E
|
||||||
|
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F
|
||||||
|
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040
|
||||||
|
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041
|
||||||
|
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042
|
||||||
|
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043
|
||||||
|
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044
|
||||||
|
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045
|
||||||
|
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046
|
||||||
|
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F
|
||||||
|
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050
|
||||||
|
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051
|
||||||
|
cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052
|
||||||
|
cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053
|
||||||
|
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054
|
||||||
|
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055
|
||||||
|
cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056
|
||||||
|
cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057
|
||||||
|
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058
|
||||||
|
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059
|
||||||
|
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A
|
||||||
|
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063
|
||||||
|
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064
|
||||||
|
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065
|
||||||
|
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066
|
||||||
|
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067
|
||||||
|
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068
|
||||||
|
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069
|
||||||
|
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A
|
||||||
|
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B
|
||||||
|
cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C
|
||||||
|
cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D
|
||||||
|
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E
|
||||||
|
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079
|
||||||
|
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A
|
||||||
|
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B
|
||||||
|
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C
|
||||||
|
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D
|
||||||
|
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E
|
||||||
|
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F
|
||||||
|
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080
|
||||||
|
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081
|
||||||
|
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082
|
||||||
|
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083
|
||||||
|
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084
|
||||||
|
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D
|
||||||
|
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E
|
||||||
|
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F
|
||||||
|
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090
|
||||||
|
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091
|
||||||
|
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092
|
||||||
|
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093
|
||||||
|
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094
|
||||||
|
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095
|
||||||
|
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096
|
||||||
|
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097
|
||||||
|
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098
|
||||||
|
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B
|
||||||
|
cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C
|
||||||
|
cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D
|
||||||
|
cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E
|
||||||
|
cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F
|
||||||
|
cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0
|
||||||
|
cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1
|
||||||
|
cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2
|
||||||
|
cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3
|
||||||
|
cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4
|
||||||
|
cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5
|
||||||
|
cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6
|
||||||
|
cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7
|
||||||
|
cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8
|
||||||
|
cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9
|
||||||
|
cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA
|
||||||
|
cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF
|
||||||
|
// Unassigned uint16 = 0xC0B0-FF
|
||||||
|
// Unassigned uint16 = 0xC1-CB,*
|
||||||
|
// Unassigned uint16 = 0xCC00-A7
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9
|
||||||
|
cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA
|
||||||
|
cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC
|
||||||
|
cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD
|
||||||
|
cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE
|
||||||
|
)
|
||||||
|
|
||||||
|
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
|
||||||
|
// References:
|
||||||
|
// https://tools.ietf.org/html/rfc7540#appendix-A
|
||||||
|
// Reject cipher suites from Appendix A.
|
||||||
|
// "This list includes those cipher suites that do not
|
||||||
|
// offer an ephemeral key exchange and those that are
|
||||||
|
// based on the TLS null, stream or block cipher type"
|
||||||
|
func isBadCipher(cipher uint16) bool {
|
||||||
|
switch cipher {
|
||||||
|
case cipher_TLS_NULL_WITH_NULL_NULL,
|
||||||
|
cipher_TLS_RSA_WITH_NULL_MD5,
|
||||||
|
cipher_TLS_RSA_WITH_NULL_SHA,
|
||||||
|
cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5,
|
||||||
|
cipher_TLS_RSA_WITH_RC4_128_MD5,
|
||||||
|
cipher_TLS_RSA_WITH_RC4_128_SHA,
|
||||||
|
cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5,
|
||||||
|
cipher_TLS_RSA_WITH_IDEA_CBC_SHA,
|
||||||
|
cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
||||||
|
cipher_TLS_RSA_WITH_DES_CBC_SHA,
|
||||||
|
cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA,
|
||||||
|
cipher_TLS_DH_DSS_WITH_DES_CBC_SHA,
|
||||||
|
cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
||||||
|
cipher_TLS_DH_RSA_WITH_DES_CBC_SHA,
|
||||||
|
cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA,
|
||||||
|
cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA,
|
||||||
|
cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA,
|
||||||
|
cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA,
|
||||||
|
cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5,
|
||||||
|
cipher_TLS_DH_anon_WITH_RC4_128_MD5,
|
||||||
|
cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA,
|
||||||
|
cipher_TLS_DH_anon_WITH_DES_CBC_SHA,
|
||||||
|
cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
cipher_TLS_KRB5_WITH_DES_CBC_SHA,
|
||||||
|
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
cipher_TLS_KRB5_WITH_RC4_128_SHA,
|
||||||
|
cipher_TLS_KRB5_WITH_IDEA_CBC_SHA,
|
||||||
|
cipher_TLS_KRB5_WITH_DES_CBC_MD5,
|
||||||
|
cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5,
|
||||||
|
cipher_TLS_KRB5_WITH_RC4_128_MD5,
|
||||||
|
cipher_TLS_KRB5_WITH_IDEA_CBC_MD5,
|
||||||
|
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA,
|
||||||
|
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA,
|
||||||
|
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA,
|
||||||
|
cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5,
|
||||||
|
cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5,
|
||||||
|
cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5,
|
||||||
|
cipher_TLS_PSK_WITH_NULL_SHA,
|
||||||
|
cipher_TLS_DHE_PSK_WITH_NULL_SHA,
|
||||||
|
cipher_TLS_RSA_PSK_WITH_NULL_SHA,
|
||||||
|
cipher_TLS_RSA_WITH_AES_128_CBC_SHA,
|
||||||
|
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA,
|
||||||
|
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA,
|
||||||
|
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA,
|
||||||
|
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA,
|
||||||
|
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA,
|
||||||
|
cipher_TLS_RSA_WITH_AES_256_CBC_SHA,
|
||||||
|
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA,
|
||||||
|
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA,
|
||||||
|
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA,
|
||||||
|
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA,
|
||||||
|
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA,
|
||||||
|
cipher_TLS_RSA_WITH_NULL_SHA256,
|
||||||
|
cipher_TLS_RSA_WITH_AES_128_CBC_SHA256,
|
||||||
|
cipher_TLS_RSA_WITH_AES_256_CBC_SHA256,
|
||||||
|
cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256,
|
||||||
|
cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256,
|
||||||
|
cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256,
|
||||||
|
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
||||||
|
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA,
|
||||||
|
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
||||||
|
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA,
|
||||||
|
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA,
|
||||||
|
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA,
|
||||||
|
cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256,
|
||||||
|
cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256,
|
||||||
|
cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256,
|
||||||
|
cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256,
|
||||||
|
cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256,
|
||||||
|
cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256,
|
||||||
|
cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256,
|
||||||
|
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
||||||
|
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA,
|
||||||
|
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
||||||
|
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA,
|
||||||
|
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA,
|
||||||
|
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA,
|
||||||
|
cipher_TLS_PSK_WITH_RC4_128_SHA,
|
||||||
|
cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
cipher_TLS_PSK_WITH_AES_128_CBC_SHA,
|
||||||
|
cipher_TLS_PSK_WITH_AES_256_CBC_SHA,
|
||||||
|
cipher_TLS_DHE_PSK_WITH_RC4_128_SHA,
|
||||||
|
cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA,
|
||||||
|
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA,
|
||||||
|
cipher_TLS_RSA_PSK_WITH_RC4_128_SHA,
|
||||||
|
cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA,
|
||||||
|
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA,
|
||||||
|
cipher_TLS_RSA_WITH_SEED_CBC_SHA,
|
||||||
|
cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA,
|
||||||
|
cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA,
|
||||||
|
cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA,
|
||||||
|
cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA,
|
||||||
|
cipher_TLS_DH_anon_WITH_SEED_CBC_SHA,
|
||||||
|
cipher_TLS_RSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
cipher_TLS_RSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256,
|
||||||
|
cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384,
|
||||||
|
cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256,
|
||||||
|
cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384,
|
||||||
|
cipher_TLS_PSK_WITH_AES_128_GCM_SHA256,
|
||||||
|
cipher_TLS_PSK_WITH_AES_256_GCM_SHA384,
|
||||||
|
cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256,
|
||||||
|
cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384,
|
||||||
|
cipher_TLS_PSK_WITH_AES_128_CBC_SHA256,
|
||||||
|
cipher_TLS_PSK_WITH_AES_256_CBC_SHA384,
|
||||||
|
cipher_TLS_PSK_WITH_NULL_SHA256,
|
||||||
|
cipher_TLS_PSK_WITH_NULL_SHA384,
|
||||||
|
cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256,
|
||||||
|
cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384,
|
||||||
|
cipher_TLS_DHE_PSK_WITH_NULL_SHA256,
|
||||||
|
cipher_TLS_DHE_PSK_WITH_NULL_SHA384,
|
||||||
|
cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256,
|
||||||
|
cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384,
|
||||||
|
cipher_TLS_RSA_PSK_WITH_NULL_SHA256,
|
||||||
|
cipher_TLS_RSA_PSK_WITH_NULL_SHA384,
|
||||||
|
cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
||||||
|
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256,
|
||||||
|
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
||||||
|
cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256,
|
||||||
|
cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256,
|
||||||
|
cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256,
|
||||||
|
cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV,
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA,
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA,
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA,
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA,
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA,
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_NULL_SHA,
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA,
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA,
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA,
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_NULL_SHA,
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
|
||||||
|
cipher_TLS_ECDH_anon_WITH_NULL_SHA,
|
||||||
|
cipher_TLS_ECDH_anon_WITH_RC4_128_SHA,
|
||||||
|
cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA,
|
||||||
|
cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA,
|
||||||
|
cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA,
|
||||||
|
cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA,
|
||||||
|
cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA,
|
||||||
|
cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA,
|
||||||
|
cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA,
|
||||||
|
cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA,
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384,
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256,
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384,
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256,
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384,
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256,
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384,
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256,
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384,
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA,
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA,
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA,
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA,
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256,
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384,
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA,
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256,
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384,
|
||||||
|
cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||||
|
cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384,
|
||||||
|
cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||||
|
cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384,
|
||||||
|
cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||||
|
cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384,
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384,
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384,
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384,
|
||||||
|
cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256,
|
||||||
|
cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384,
|
||||||
|
cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256,
|
||||||
|
cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384,
|
||||||
|
cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256,
|
||||||
|
cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384,
|
||||||
|
cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256,
|
||||||
|
cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384,
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256,
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384,
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256,
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384,
|
||||||
|
cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||||
|
cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||||
|
cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||||
|
cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256,
|
||||||
|
cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384,
|
||||||
|
cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256,
|
||||||
|
cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384,
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384,
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384,
|
||||||
|
cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||||
|
cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||||
|
cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||||
|
cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||||
|
cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256,
|
||||||
|
cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384,
|
||||||
|
cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256,
|
||||||
|
cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384,
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||||
|
cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256,
|
||||||
|
cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384,
|
||||||
|
cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256,
|
||||||
|
cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384,
|
||||||
|
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256,
|
||||||
|
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384,
|
||||||
|
cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||||
|
cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||||
|
cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256,
|
||||||
|
cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384,
|
||||||
|
cipher_TLS_RSA_WITH_AES_128_CCM,
|
||||||
|
cipher_TLS_RSA_WITH_AES_256_CCM,
|
||||||
|
cipher_TLS_RSA_WITH_AES_128_CCM_8,
|
||||||
|
cipher_TLS_RSA_WITH_AES_256_CCM_8,
|
||||||
|
cipher_TLS_PSK_WITH_AES_128_CCM,
|
||||||
|
cipher_TLS_PSK_WITH_AES_256_CCM,
|
||||||
|
cipher_TLS_PSK_WITH_AES_128_CCM_8,
|
||||||
|
cipher_TLS_PSK_WITH_AES_256_CCM_8:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
9
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
9
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
|
@ -53,13 +53,13 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
|
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
|
||||||
if req.Close && dialOnMiss {
|
if isConnectionCloseRequest(req) && dialOnMiss {
|
||||||
// It gets its own connection.
|
// It gets its own connection.
|
||||||
cc, err := p.t.dialClientConn(addr)
|
const singleUse = true
|
||||||
|
cc, err := p.t.dialClientConn(addr, singleUse)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
cc.singleUse = true
|
|
||||||
return cc, nil
|
return cc, nil
|
||||||
}
|
}
|
||||||
p.mu.Lock()
|
p.mu.Lock()
|
||||||
|
@ -104,7 +104,8 @@ func (p *clientConnPool) getStartDialLocked(addr string) *dialCall {
|
||||||
|
|
||||||
// run in its own goroutine.
|
// run in its own goroutine.
|
||||||
func (c *dialCall) dial(addr string) {
|
func (c *dialCall) dial(addr string) {
|
||||||
c.res, c.err = c.p.t.dialClientConn(addr)
|
const singleUse = false // shared conn
|
||||||
|
c.res, c.err = c.p.t.dialClientConn(addr, singleUse)
|
||||||
close(c.done)
|
close(c.done)
|
||||||
|
|
||||||
c.p.mu.Lock()
|
c.p.mu.Lock()
|
||||||
|
|
2
vendor/golang.org/x/net/http2/configure_transport.go
generated
vendored
2
vendor/golang.org/x/net/http2/configure_transport.go
generated
vendored
|
@ -56,7 +56,7 @@ func configureTransport(t1 *http.Transport) (*Transport, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// registerHTTPSProtocol calls Transport.RegisterProtocol but
|
// registerHTTPSProtocol calls Transport.RegisterProtocol but
|
||||||
// convering panics into errors.
|
// converting panics into errors.
|
||||||
func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
|
func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
|
||||||
defer func() {
|
defer func() {
|
||||||
if e := recover(); e != nil {
|
if e := recover(); e != nil {
|
||||||
|
|
146
vendor/golang.org/x/net/http2/databuffer.go
generated
vendored
Normal file
146
vendor/golang.org/x/net/http2/databuffer.go
generated
vendored
Normal file
|
@ -0,0 +1,146 @@
|
||||||
|
// Copyright 2014 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Buffer chunks are allocated from a pool to reduce pressure on GC.
|
||||||
|
// The maximum wasted space per dataBuffer is 2x the largest size class,
|
||||||
|
// which happens when the dataBuffer has multiple chunks and there is
|
||||||
|
// one unread byte in both the first and last chunks. We use a few size
|
||||||
|
// classes to minimize overheads for servers that typically receive very
|
||||||
|
// small request bodies.
|
||||||
|
//
|
||||||
|
// TODO: Benchmark to determine if the pools are necessary. The GC may have
|
||||||
|
// improved enough that we can instead allocate chunks like this:
|
||||||
|
// make([]byte, max(16<<10, expectedBytesRemaining))
|
||||||
|
var (
|
||||||
|
dataChunkSizeClasses = []int{
|
||||||
|
1 << 10,
|
||||||
|
2 << 10,
|
||||||
|
4 << 10,
|
||||||
|
8 << 10,
|
||||||
|
16 << 10,
|
||||||
|
}
|
||||||
|
dataChunkPools = [...]sync.Pool{
|
||||||
|
{New: func() interface{} { return make([]byte, 1<<10) }},
|
||||||
|
{New: func() interface{} { return make([]byte, 2<<10) }},
|
||||||
|
{New: func() interface{} { return make([]byte, 4<<10) }},
|
||||||
|
{New: func() interface{} { return make([]byte, 8<<10) }},
|
||||||
|
{New: func() interface{} { return make([]byte, 16<<10) }},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func getDataBufferChunk(size int64) []byte {
|
||||||
|
i := 0
|
||||||
|
for ; i < len(dataChunkSizeClasses)-1; i++ {
|
||||||
|
if size <= int64(dataChunkSizeClasses[i]) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return dataChunkPools[i].Get().([]byte)
|
||||||
|
}
|
||||||
|
|
||||||
|
func putDataBufferChunk(p []byte) {
|
||||||
|
for i, n := range dataChunkSizeClasses {
|
||||||
|
if len(p) == n {
|
||||||
|
dataChunkPools[i].Put(p)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
panic(fmt.Sprintf("unexpected buffer len=%v", len(p)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// dataBuffer is an io.ReadWriter backed by a list of data chunks.
|
||||||
|
// Each dataBuffer is used to read DATA frames on a single stream.
|
||||||
|
// The buffer is divided into chunks so the server can limit the
|
||||||
|
// total memory used by a single connection without limiting the
|
||||||
|
// request body size on any single stream.
|
||||||
|
type dataBuffer struct {
|
||||||
|
chunks [][]byte
|
||||||
|
r int // next byte to read is chunks[0][r]
|
||||||
|
w int // next byte to write is chunks[len(chunks)-1][w]
|
||||||
|
size int // total buffered bytes
|
||||||
|
expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var errReadEmpty = errors.New("read from empty dataBuffer")
|
||||||
|
|
||||||
|
// Read copies bytes from the buffer into p.
|
||||||
|
// It is an error to read when no data is available.
|
||||||
|
func (b *dataBuffer) Read(p []byte) (int, error) {
|
||||||
|
if b.size == 0 {
|
||||||
|
return 0, errReadEmpty
|
||||||
|
}
|
||||||
|
var ntotal int
|
||||||
|
for len(p) > 0 && b.size > 0 {
|
||||||
|
readFrom := b.bytesFromFirstChunk()
|
||||||
|
n := copy(p, readFrom)
|
||||||
|
p = p[n:]
|
||||||
|
ntotal += n
|
||||||
|
b.r += n
|
||||||
|
b.size -= n
|
||||||
|
// If the first chunk has been consumed, advance to the next chunk.
|
||||||
|
if b.r == len(b.chunks[0]) {
|
||||||
|
putDataBufferChunk(b.chunks[0])
|
||||||
|
end := len(b.chunks) - 1
|
||||||
|
copy(b.chunks[:end], b.chunks[1:])
|
||||||
|
b.chunks[end] = nil
|
||||||
|
b.chunks = b.chunks[:end]
|
||||||
|
b.r = 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ntotal, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *dataBuffer) bytesFromFirstChunk() []byte {
|
||||||
|
if len(b.chunks) == 1 {
|
||||||
|
return b.chunks[0][b.r:b.w]
|
||||||
|
}
|
||||||
|
return b.chunks[0][b.r:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of bytes of the unread portion of the buffer.
|
||||||
|
func (b *dataBuffer) Len() int {
|
||||||
|
return b.size
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write appends p to the buffer.
|
||||||
|
func (b *dataBuffer) Write(p []byte) (int, error) {
|
||||||
|
ntotal := len(p)
|
||||||
|
for len(p) > 0 {
|
||||||
|
// If the last chunk is empty, allocate a new chunk. Try to allocate
|
||||||
|
// enough to fully copy p plus any additional bytes we expect to
|
||||||
|
// receive. However, this may allocate less than len(p).
|
||||||
|
want := int64(len(p))
|
||||||
|
if b.expected > want {
|
||||||
|
want = b.expected
|
||||||
|
}
|
||||||
|
chunk := b.lastChunkOrAlloc(want)
|
||||||
|
n := copy(chunk[b.w:], p)
|
||||||
|
p = p[n:]
|
||||||
|
b.w += n
|
||||||
|
b.size += n
|
||||||
|
b.expected -= int64(n)
|
||||||
|
}
|
||||||
|
return ntotal, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte {
|
||||||
|
if len(b.chunks) != 0 {
|
||||||
|
last := b.chunks[len(b.chunks)-1]
|
||||||
|
if b.w < len(last) {
|
||||||
|
return last
|
||||||
|
}
|
||||||
|
}
|
||||||
|
chunk := getDataBufferChunk(want)
|
||||||
|
b.chunks = append(b.chunks, chunk)
|
||||||
|
b.w = 0
|
||||||
|
return chunk
|
||||||
|
}
|
21
vendor/golang.org/x/net/http2/errors.go
generated
vendored
21
vendor/golang.org/x/net/http2/errors.go
generated
vendored
|
@ -64,9 +64,17 @@ func (e ConnectionError) Error() string { return fmt.Sprintf("connection error:
|
||||||
type StreamError struct {
|
type StreamError struct {
|
||||||
StreamID uint32
|
StreamID uint32
|
||||||
Code ErrCode
|
Code ErrCode
|
||||||
|
Cause error // optional additional detail
|
||||||
|
}
|
||||||
|
|
||||||
|
func streamError(id uint32, code ErrCode) StreamError {
|
||||||
|
return StreamError{StreamID: id, Code: code}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e StreamError) Error() string {
|
func (e StreamError) Error() string {
|
||||||
|
if e.Cause != nil {
|
||||||
|
return fmt.Sprintf("stream error: stream ID %d; %v; %v", e.StreamID, e.Code, e.Cause)
|
||||||
|
}
|
||||||
return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
|
return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -79,13 +87,16 @@ type goAwayFlowError struct{}
|
||||||
|
|
||||||
func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
|
func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
|
||||||
|
|
||||||
// connErrorReason wraps a ConnectionError with an informative error about why it occurs.
|
// connError represents an HTTP/2 ConnectionError error code, along
|
||||||
|
// with a string (for debugging) explaining why.
|
||||||
|
//
|
||||||
// Errors of this type are only returned by the frame parser functions
|
// Errors of this type are only returned by the frame parser functions
|
||||||
// and converted into ConnectionError(ErrCodeProtocol).
|
// and converted into ConnectionError(Code), after stashing away
|
||||||
|
// the Reason into the Framer's errDetail field, accessible via
|
||||||
|
// the (*Framer).ErrorDetail method.
|
||||||
type connError struct {
|
type connError struct {
|
||||||
Code ErrCode
|
Code ErrCode // the ConnectionError error code
|
||||||
Reason string
|
Reason string // additional reason
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e connError) Error() string {
|
func (e connError) Error() string {
|
||||||
|
|
60
vendor/golang.org/x/net/http2/fixed_buffer.go
generated
vendored
60
vendor/golang.org/x/net/http2/fixed_buffer.go
generated
vendored
|
@ -1,60 +0,0 @@
|
||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package http2
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// fixedBuffer is an io.ReadWriter backed by a fixed size buffer.
|
|
||||||
// It never allocates, but moves old data as new data is written.
|
|
||||||
type fixedBuffer struct {
|
|
||||||
buf []byte
|
|
||||||
r, w int
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
errReadEmpty = errors.New("read from empty fixedBuffer")
|
|
||||||
errWriteFull = errors.New("write on full fixedBuffer")
|
|
||||||
)
|
|
||||||
|
|
||||||
// Read copies bytes from the buffer into p.
|
|
||||||
// It is an error to read when no data is available.
|
|
||||||
func (b *fixedBuffer) Read(p []byte) (n int, err error) {
|
|
||||||
if b.r == b.w {
|
|
||||||
return 0, errReadEmpty
|
|
||||||
}
|
|
||||||
n = copy(p, b.buf[b.r:b.w])
|
|
||||||
b.r += n
|
|
||||||
if b.r == b.w {
|
|
||||||
b.r = 0
|
|
||||||
b.w = 0
|
|
||||||
}
|
|
||||||
return n, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of bytes of the unread portion of the buffer.
|
|
||||||
func (b *fixedBuffer) Len() int {
|
|
||||||
return b.w - b.r
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write copies bytes from p into the buffer.
|
|
||||||
// It is an error to write more data than the buffer can hold.
|
|
||||||
func (b *fixedBuffer) Write(p []byte) (n int, err error) {
|
|
||||||
// Slide existing data to beginning.
|
|
||||||
if b.r > 0 && len(p) > len(b.buf)-b.w {
|
|
||||||
copy(b.buf, b.buf[b.r:b.w])
|
|
||||||
b.w -= b.r
|
|
||||||
b.r = 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write new data.
|
|
||||||
n = copy(b.buf[b.w:], p)
|
|
||||||
b.w += n
|
|
||||||
if n < len(p) {
|
|
||||||
err = errWriteFull
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
128
vendor/golang.org/x/net/http2/frame.go
generated
vendored
128
vendor/golang.org/x/net/http2/frame.go
generated
vendored
|
@ -122,7 +122,7 @@ var flagName = map[FrameType]map[Flags]string{
|
||||||
// a frameParser parses a frame given its FrameHeader and payload
|
// a frameParser parses a frame given its FrameHeader and payload
|
||||||
// bytes. The length of payload will always equal fh.Length (which
|
// bytes. The length of payload will always equal fh.Length (which
|
||||||
// might be 0).
|
// might be 0).
|
||||||
type frameParser func(fh FrameHeader, payload []byte) (Frame, error)
|
type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error)
|
||||||
|
|
||||||
var frameParsers = map[FrameType]frameParser{
|
var frameParsers = map[FrameType]frameParser{
|
||||||
FrameData: parseDataFrame,
|
FrameData: parseDataFrame,
|
||||||
|
@ -317,10 +317,14 @@ type Framer struct {
|
||||||
// non-Continuation or Continuation on a different stream is
|
// non-Continuation or Continuation on a different stream is
|
||||||
// attempted to be written.
|
// attempted to be written.
|
||||||
|
|
||||||
logReads bool
|
logReads, logWrites bool
|
||||||
|
|
||||||
debugFramer *Framer // only use for logging written writes
|
debugFramer *Framer // only use for logging written writes
|
||||||
debugFramerBuf *bytes.Buffer
|
debugFramerBuf *bytes.Buffer
|
||||||
|
debugReadLoggerf func(string, ...interface{})
|
||||||
|
debugWriteLoggerf func(string, ...interface{})
|
||||||
|
|
||||||
|
frameCache *frameCache // nil if frames aren't reused (default)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fr *Framer) maxHeaderListSize() uint32 {
|
func (fr *Framer) maxHeaderListSize() uint32 {
|
||||||
|
@ -355,7 +359,7 @@ func (f *Framer) endWrite() error {
|
||||||
byte(length>>16),
|
byte(length>>16),
|
||||||
byte(length>>8),
|
byte(length>>8),
|
||||||
byte(length))
|
byte(length))
|
||||||
if logFrameWrites {
|
if f.logWrites {
|
||||||
f.logWrite()
|
f.logWrite()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -378,10 +382,10 @@ func (f *Framer) logWrite() {
|
||||||
f.debugFramerBuf.Write(f.wbuf)
|
f.debugFramerBuf.Write(f.wbuf)
|
||||||
fr, err := f.debugFramer.ReadFrame()
|
fr, err := f.debugFramer.ReadFrame()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("http2: Framer %p: failed to decode just-written frame", f)
|
f.debugWriteLoggerf("http2: Framer %p: failed to decode just-written frame", f)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Printf("http2: Framer %p: wrote %v", f, summarizeFrame(fr))
|
f.debugWriteLoggerf("http2: Framer %p: wrote %v", f, summarizeFrame(fr))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) }
|
func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) }
|
||||||
|
@ -396,12 +400,36 @@ const (
|
||||||
maxFrameSize = 1<<24 - 1
|
maxFrameSize = 1<<24 - 1
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// SetReuseFrames allows the Framer to reuse Frames.
|
||||||
|
// If called on a Framer, Frames returned by calls to ReadFrame are only
|
||||||
|
// valid until the next call to ReadFrame.
|
||||||
|
func (fr *Framer) SetReuseFrames() {
|
||||||
|
if fr.frameCache != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fr.frameCache = &frameCache{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type frameCache struct {
|
||||||
|
dataFrame DataFrame
|
||||||
|
}
|
||||||
|
|
||||||
|
func (fc *frameCache) getDataFrame() *DataFrame {
|
||||||
|
if fc == nil {
|
||||||
|
return &DataFrame{}
|
||||||
|
}
|
||||||
|
return &fc.dataFrame
|
||||||
|
}
|
||||||
|
|
||||||
// NewFramer returns a Framer that writes frames to w and reads them from r.
|
// NewFramer returns a Framer that writes frames to w and reads them from r.
|
||||||
func NewFramer(w io.Writer, r io.Reader) *Framer {
|
func NewFramer(w io.Writer, r io.Reader) *Framer {
|
||||||
fr := &Framer{
|
fr := &Framer{
|
||||||
w: w,
|
w: w,
|
||||||
r: r,
|
r: r,
|
||||||
logReads: logFrameReads,
|
logReads: logFrameReads,
|
||||||
|
logWrites: logFrameWrites,
|
||||||
|
debugReadLoggerf: log.Printf,
|
||||||
|
debugWriteLoggerf: log.Printf,
|
||||||
}
|
}
|
||||||
fr.getReadBuf = func(size uint32) []byte {
|
fr.getReadBuf = func(size uint32) []byte {
|
||||||
if cap(fr.readBuf) >= int(size) {
|
if cap(fr.readBuf) >= int(size) {
|
||||||
|
@ -472,7 +500,7 @@ func (fr *Framer) ReadFrame() (Frame, error) {
|
||||||
if _, err := io.ReadFull(fr.r, payload); err != nil {
|
if _, err := io.ReadFull(fr.r, payload); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
f, err := typeFrameParser(fh.Type)(fh, payload)
|
f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if ce, ok := err.(connError); ok {
|
if ce, ok := err.(connError); ok {
|
||||||
return nil, fr.connError(ce.Code, ce.Reason)
|
return nil, fr.connError(ce.Code, ce.Reason)
|
||||||
|
@ -483,7 +511,7 @@ func (fr *Framer) ReadFrame() (Frame, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if fr.logReads {
|
if fr.logReads {
|
||||||
log.Printf("http2: Framer %p: read %v", fr, summarizeFrame(f))
|
fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f))
|
||||||
}
|
}
|
||||||
if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil {
|
if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil {
|
||||||
return fr.readMetaFrame(f.(*HeadersFrame))
|
return fr.readMetaFrame(f.(*HeadersFrame))
|
||||||
|
@ -560,7 +588,7 @@ func (f *DataFrame) Data() []byte {
|
||||||
return f.data
|
return f.data
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
|
func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
|
||||||
if fh.StreamID == 0 {
|
if fh.StreamID == 0 {
|
||||||
// DATA frames MUST be associated with a stream. If a
|
// DATA frames MUST be associated with a stream. If a
|
||||||
// DATA frame is received whose stream identifier
|
// DATA frame is received whose stream identifier
|
||||||
|
@ -569,9 +597,9 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
|
||||||
// PROTOCOL_ERROR.
|
// PROTOCOL_ERROR.
|
||||||
return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
|
return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
|
||||||
}
|
}
|
||||||
f := &DataFrame{
|
f := fc.getDataFrame()
|
||||||
FrameHeader: fh,
|
f.FrameHeader = fh
|
||||||
}
|
|
||||||
var padSize byte
|
var padSize byte
|
||||||
if fh.Flags.Has(FlagDataPadded) {
|
if fh.Flags.Has(FlagDataPadded) {
|
||||||
var err error
|
var err error
|
||||||
|
@ -594,6 +622,8 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
|
||||||
var (
|
var (
|
||||||
errStreamID = errors.New("invalid stream ID")
|
errStreamID = errors.New("invalid stream ID")
|
||||||
errDepStreamID = errors.New("invalid dependent stream ID")
|
errDepStreamID = errors.New("invalid dependent stream ID")
|
||||||
|
errPadLength = errors.New("pad length too large")
|
||||||
|
errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled")
|
||||||
)
|
)
|
||||||
|
|
||||||
func validStreamIDOrZero(streamID uint32) bool {
|
func validStreamIDOrZero(streamID uint32) bool {
|
||||||
|
@ -607,18 +637,51 @@ func validStreamID(streamID uint32) bool {
|
||||||
// WriteData writes a DATA frame.
|
// WriteData writes a DATA frame.
|
||||||
//
|
//
|
||||||
// It will perform exactly one Write to the underlying Writer.
|
// It will perform exactly one Write to the underlying Writer.
|
||||||
// It is the caller's responsibility to not call other Write methods concurrently.
|
// It is the caller's responsibility not to violate the maximum frame size
|
||||||
|
// and to not call other Write methods concurrently.
|
||||||
func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
|
func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
|
||||||
// TODO: ignoring padding for now. will add when somebody cares.
|
return f.WriteDataPadded(streamID, endStream, data, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteData writes a DATA frame with optional padding.
|
||||||
|
//
|
||||||
|
// If pad is nil, the padding bit is not sent.
|
||||||
|
// The length of pad must not exceed 255 bytes.
|
||||||
|
// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set.
|
||||||
|
//
|
||||||
|
// It will perform exactly one Write to the underlying Writer.
|
||||||
|
// It is the caller's responsibility not to violate the maximum frame size
|
||||||
|
// and to not call other Write methods concurrently.
|
||||||
|
func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []byte) error {
|
||||||
if !validStreamID(streamID) && !f.AllowIllegalWrites {
|
if !validStreamID(streamID) && !f.AllowIllegalWrites {
|
||||||
return errStreamID
|
return errStreamID
|
||||||
}
|
}
|
||||||
|
if len(pad) > 0 {
|
||||||
|
if len(pad) > 255 {
|
||||||
|
return errPadLength
|
||||||
|
}
|
||||||
|
if !f.AllowIllegalWrites {
|
||||||
|
for _, b := range pad {
|
||||||
|
if b != 0 {
|
||||||
|
// "Padding octets MUST be set to zero when sending."
|
||||||
|
return errPadBytes
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
var flags Flags
|
var flags Flags
|
||||||
if endStream {
|
if endStream {
|
||||||
flags |= FlagDataEndStream
|
flags |= FlagDataEndStream
|
||||||
}
|
}
|
||||||
|
if pad != nil {
|
||||||
|
flags |= FlagDataPadded
|
||||||
|
}
|
||||||
f.startWrite(FrameData, flags, streamID)
|
f.startWrite(FrameData, flags, streamID)
|
||||||
|
if pad != nil {
|
||||||
|
f.wbuf = append(f.wbuf, byte(len(pad)))
|
||||||
|
}
|
||||||
f.wbuf = append(f.wbuf, data...)
|
f.wbuf = append(f.wbuf, data...)
|
||||||
|
f.wbuf = append(f.wbuf, pad...)
|
||||||
return f.endWrite()
|
return f.endWrite()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -632,7 +695,7 @@ type SettingsFrame struct {
|
||||||
p []byte
|
p []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) {
|
func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
||||||
if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
|
if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
|
||||||
// When this (ACK 0x1) bit is set, the payload of the
|
// When this (ACK 0x1) bit is set, the payload of the
|
||||||
// SETTINGS frame MUST be empty. Receipt of a
|
// SETTINGS frame MUST be empty. Receipt of a
|
||||||
|
@ -714,7 +777,7 @@ func (f *Framer) WriteSettings(settings ...Setting) error {
|
||||||
return f.endWrite()
|
return f.endWrite()
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteSettings writes an empty SETTINGS frame with the ACK bit set.
|
// WriteSettingsAck writes an empty SETTINGS frame with the ACK bit set.
|
||||||
//
|
//
|
||||||
// It will perform exactly one Write to the underlying Writer.
|
// It will perform exactly one Write to the underlying Writer.
|
||||||
// It is the caller's responsibility to not call other Write methods concurrently.
|
// It is the caller's responsibility to not call other Write methods concurrently.
|
||||||
|
@ -734,7 +797,7 @@ type PingFrame struct {
|
||||||
|
|
||||||
func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
|
func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
|
||||||
|
|
||||||
func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) {
|
func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
|
||||||
if len(payload) != 8 {
|
if len(payload) != 8 {
|
||||||
return nil, ConnectionError(ErrCodeFrameSize)
|
return nil, ConnectionError(ErrCodeFrameSize)
|
||||||
}
|
}
|
||||||
|
@ -774,7 +837,7 @@ func (f *GoAwayFrame) DebugData() []byte {
|
||||||
return f.debugData
|
return f.debugData
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) {
|
func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
||||||
if fh.StreamID != 0 {
|
if fh.StreamID != 0 {
|
||||||
return nil, ConnectionError(ErrCodeProtocol)
|
return nil, ConnectionError(ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
|
@ -814,7 +877,7 @@ func (f *UnknownFrame) Payload() []byte {
|
||||||
return f.p
|
return f.p
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) {
|
func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
||||||
return &UnknownFrame{fh, p}, nil
|
return &UnknownFrame{fh, p}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -825,7 +888,7 @@ type WindowUpdateFrame struct {
|
||||||
Increment uint32 // never read with high bit set
|
Increment uint32 // never read with high bit set
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) {
|
func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
||||||
if len(p) != 4 {
|
if len(p) != 4 {
|
||||||
return nil, ConnectionError(ErrCodeFrameSize)
|
return nil, ConnectionError(ErrCodeFrameSize)
|
||||||
}
|
}
|
||||||
|
@ -840,7 +903,7 @@ func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) {
|
||||||
if fh.StreamID == 0 {
|
if fh.StreamID == 0 {
|
||||||
return nil, ConnectionError(ErrCodeProtocol)
|
return nil, ConnectionError(ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
return nil, StreamError{fh.StreamID, ErrCodeProtocol}
|
return nil, streamError(fh.StreamID, ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
return &WindowUpdateFrame{
|
return &WindowUpdateFrame{
|
||||||
FrameHeader: fh,
|
FrameHeader: fh,
|
||||||
|
@ -890,7 +953,7 @@ func (f *HeadersFrame) HasPriority() bool {
|
||||||
return f.FrameHeader.Flags.Has(FlagHeadersPriority)
|
return f.FrameHeader.Flags.Has(FlagHeadersPriority)
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) {
|
func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
|
||||||
hf := &HeadersFrame{
|
hf := &HeadersFrame{
|
||||||
FrameHeader: fh,
|
FrameHeader: fh,
|
||||||
}
|
}
|
||||||
|
@ -921,7 +984,7 @@ func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(p)-int(padLength) <= 0 {
|
if len(p)-int(padLength) <= 0 {
|
||||||
return nil, StreamError{fh.StreamID, ErrCodeProtocol}
|
return nil, streamError(fh.StreamID, ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
hf.headerFragBuf = p[:len(p)-int(padLength)]
|
hf.headerFragBuf = p[:len(p)-int(padLength)]
|
||||||
return hf, nil
|
return hf, nil
|
||||||
|
@ -1027,7 +1090,7 @@ func (p PriorityParam) IsZero() bool {
|
||||||
return p == PriorityParam{}
|
return p == PriorityParam{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) {
|
func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
|
||||||
if fh.StreamID == 0 {
|
if fh.StreamID == 0 {
|
||||||
return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
|
return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
|
||||||
}
|
}
|
||||||
|
@ -1074,7 +1137,7 @@ type RSTStreamFrame struct {
|
||||||
ErrCode ErrCode
|
ErrCode ErrCode
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) {
|
func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
||||||
if len(p) != 4 {
|
if len(p) != 4 {
|
||||||
return nil, ConnectionError(ErrCodeFrameSize)
|
return nil, ConnectionError(ErrCodeFrameSize)
|
||||||
}
|
}
|
||||||
|
@ -1104,7 +1167,7 @@ type ContinuationFrame struct {
|
||||||
headerFragBuf []byte
|
headerFragBuf []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) {
|
func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
||||||
if fh.StreamID == 0 {
|
if fh.StreamID == 0 {
|
||||||
return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
|
return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
|
||||||
}
|
}
|
||||||
|
@ -1154,7 +1217,7 @@ func (f *PushPromiseFrame) HeadersEnded() bool {
|
||||||
return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
|
return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
|
||||||
}
|
}
|
||||||
|
|
||||||
func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) {
|
func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
|
||||||
pp := &PushPromiseFrame{
|
pp := &PushPromiseFrame{
|
||||||
FrameHeader: fh,
|
FrameHeader: fh,
|
||||||
}
|
}
|
||||||
|
@ -1396,6 +1459,9 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
|
||||||
hdec.SetEmitEnabled(true)
|
hdec.SetEmitEnabled(true)
|
||||||
hdec.SetMaxStringLength(fr.maxHeaderStringLen())
|
hdec.SetMaxStringLength(fr.maxHeaderStringLen())
|
||||||
hdec.SetEmitFunc(func(hf hpack.HeaderField) {
|
hdec.SetEmitFunc(func(hf hpack.HeaderField) {
|
||||||
|
if VerboseLogs && fr.logReads {
|
||||||
|
fr.debugReadLoggerf("http2: decoded hpack field %+v", hf)
|
||||||
|
}
|
||||||
if !httplex.ValidHeaderFieldValue(hf.Value) {
|
if !httplex.ValidHeaderFieldValue(hf.Value) {
|
||||||
invalid = headerFieldValueError(hf.Value)
|
invalid = headerFieldValueError(hf.Value)
|
||||||
}
|
}
|
||||||
|
@ -1454,11 +1520,17 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
|
||||||
}
|
}
|
||||||
if invalid != nil {
|
if invalid != nil {
|
||||||
fr.errDetail = invalid
|
fr.errDetail = invalid
|
||||||
return nil, StreamError{mh.StreamID, ErrCodeProtocol}
|
if VerboseLogs {
|
||||||
|
log.Printf("http2: invalid header: %v", invalid)
|
||||||
|
}
|
||||||
|
return nil, StreamError{mh.StreamID, ErrCodeProtocol, invalid}
|
||||||
}
|
}
|
||||||
if err := mh.checkPseudos(); err != nil {
|
if err := mh.checkPseudos(); err != nil {
|
||||||
fr.errDetail = err
|
fr.errDetail = err
|
||||||
return nil, StreamError{mh.StreamID, ErrCodeProtocol}
|
if VerboseLogs {
|
||||||
|
log.Printf("http2: invalid pseudo headers: %v", err)
|
||||||
|
}
|
||||||
|
return nil, StreamError{mh.StreamID, ErrCodeProtocol, err}
|
||||||
}
|
}
|
||||||
return mh, nil
|
return mh, nil
|
||||||
}
|
}
|
||||||
|
|
27
vendor/golang.org/x/net/http2/go16.go
generated
vendored
27
vendor/golang.org/x/net/http2/go16.go
generated
vendored
|
@ -7,7 +7,6 @@
|
||||||
package http2
|
package http2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/tls"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
@ -15,29 +14,3 @@ import (
|
||||||
func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
|
func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
|
||||||
return t1.ExpectContinueTimeout
|
return t1.ExpectContinueTimeout
|
||||||
}
|
}
|
||||||
|
|
||||||
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
|
|
||||||
func isBadCipher(cipher uint16) bool {
|
|
||||||
switch cipher {
|
|
||||||
case tls.TLS_RSA_WITH_RC4_128_SHA,
|
|
||||||
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
|
||||||
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
|
||||||
tls.TLS_RSA_WITH_AES_128_GCM_SHA256,
|
|
||||||
tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
|
|
||||||
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
|
||||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
|
||||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
|
||||||
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
|
||||||
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
|
||||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
|
|
||||||
// Reject cipher suites from Appendix A.
|
|
||||||
// "This list includes those cipher suites that do not
|
|
||||||
// offer an ephemeral key exchange and those that are
|
|
||||||
// based on the TLS null, stream or block cipher type"
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
12
vendor/golang.org/x/net/http2/go17.go
generated
vendored
12
vendor/golang.org/x/net/http2/go17.go
generated
vendored
|
@ -39,6 +39,13 @@ type clientTrace httptrace.ClientTrace
|
||||||
|
|
||||||
func reqContext(r *http.Request) context.Context { return r.Context() }
|
func reqContext(r *http.Request) context.Context { return r.Context() }
|
||||||
|
|
||||||
|
func (t *Transport) idleConnTimeout() time.Duration {
|
||||||
|
if t.t1 != nil {
|
||||||
|
return t.t1.IdleConnTimeout
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
func setResponseUncompressed(res *http.Response) { res.Uncompressed = true }
|
func setResponseUncompressed(res *http.Response) { res.Uncompressed = true }
|
||||||
|
|
||||||
func traceGotConn(req *http.Request, cc *ClientConn) {
|
func traceGotConn(req *http.Request, cc *ClientConn) {
|
||||||
|
@ -92,3 +99,8 @@ func requestTrace(req *http.Request) *clientTrace {
|
||||||
trace := httptrace.ContextClientTrace(req.Context())
|
trace := httptrace.ContextClientTrace(req.Context())
|
||||||
return (*clientTrace)(trace)
|
return (*clientTrace)(trace)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Ping sends a PING frame to the server and waits for the ack.
|
||||||
|
func (cc *ClientConn) Ping(ctx context.Context) error {
|
||||||
|
return cc.ping(ctx)
|
||||||
|
}
|
||||||
|
|
36
vendor/golang.org/x/net/http2/go17_not18.go
generated
vendored
Normal file
36
vendor/golang.org/x/net/http2/go17_not18.go
generated
vendored
Normal file
|
@ -0,0 +1,36 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.7,!go1.8
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import "crypto/tls"
|
||||||
|
|
||||||
|
// temporary copy of Go 1.7's private tls.Config.clone:
|
||||||
|
func cloneTLSConfig(c *tls.Config) *tls.Config {
|
||||||
|
return &tls.Config{
|
||||||
|
Rand: c.Rand,
|
||||||
|
Time: c.Time,
|
||||||
|
Certificates: c.Certificates,
|
||||||
|
NameToCertificate: c.NameToCertificate,
|
||||||
|
GetCertificate: c.GetCertificate,
|
||||||
|
RootCAs: c.RootCAs,
|
||||||
|
NextProtos: c.NextProtos,
|
||||||
|
ServerName: c.ServerName,
|
||||||
|
ClientAuth: c.ClientAuth,
|
||||||
|
ClientCAs: c.ClientCAs,
|
||||||
|
InsecureSkipVerify: c.InsecureSkipVerify,
|
||||||
|
CipherSuites: c.CipherSuites,
|
||||||
|
PreferServerCipherSuites: c.PreferServerCipherSuites,
|
||||||
|
SessionTicketsDisabled: c.SessionTicketsDisabled,
|
||||||
|
SessionTicketKey: c.SessionTicketKey,
|
||||||
|
ClientSessionCache: c.ClientSessionCache,
|
||||||
|
MinVersion: c.MinVersion,
|
||||||
|
MaxVersion: c.MaxVersion,
|
||||||
|
CurvePreferences: c.CurvePreferences,
|
||||||
|
DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
|
||||||
|
Renegotiation: c.Renegotiation,
|
||||||
|
}
|
||||||
|
}
|
56
vendor/golang.org/x/net/http2/go18.go
generated
vendored
Normal file
56
vendor/golang.org/x/net/http2/go18.go
generated
vendored
Normal file
|
@ -0,0 +1,56 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.8
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func cloneTLSConfig(c *tls.Config) *tls.Config {
|
||||||
|
c2 := c.Clone()
|
||||||
|
c2.GetClientCertificate = c.GetClientCertificate // golang.org/issue/19264
|
||||||
|
return c2
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ http.Pusher = (*responseWriter)(nil)
|
||||||
|
|
||||||
|
// Push implements http.Pusher.
|
||||||
|
func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
|
||||||
|
internalOpts := pushOptions{}
|
||||||
|
if opts != nil {
|
||||||
|
internalOpts.Method = opts.Method
|
||||||
|
internalOpts.Header = opts.Header
|
||||||
|
}
|
||||||
|
return w.push(target, internalOpts)
|
||||||
|
}
|
||||||
|
|
||||||
|
func configureServer18(h1 *http.Server, h2 *Server) error {
|
||||||
|
if h2.IdleTimeout == 0 {
|
||||||
|
if h1.IdleTimeout != 0 {
|
||||||
|
h2.IdleTimeout = h1.IdleTimeout
|
||||||
|
} else {
|
||||||
|
h2.IdleTimeout = h1.ReadTimeout
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func shouldLogPanic(panicValue interface{}) bool {
|
||||||
|
return panicValue != nil && panicValue != http.ErrAbortHandler
|
||||||
|
}
|
||||||
|
|
||||||
|
func reqGetBody(req *http.Request) func() (io.ReadCloser, error) {
|
||||||
|
return req.GetBody
|
||||||
|
}
|
||||||
|
|
||||||
|
func reqBodyIsNoBody(body io.ReadCloser) bool {
|
||||||
|
return body == http.NoBody
|
||||||
|
}
|
||||||
|
|
||||||
|
func go18httpNoBody() io.ReadCloser { return http.NoBody } // for tests only
|
16
vendor/golang.org/x/net/http2/go19.go
generated
vendored
Normal file
16
vendor/golang.org/x/net/http2/go19.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.9
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func configureServer19(s *http.Server, conf *Server) error {
|
||||||
|
s.RegisterOnShutdown(conf.state.startGracefulShutdown)
|
||||||
|
return nil
|
||||||
|
}
|
27
vendor/golang.org/x/net/http2/hpack/encode.go
generated
vendored
27
vendor/golang.org/x/net/http2/hpack/encode.go
generated
vendored
|
@ -39,6 +39,7 @@ func NewEncoder(w io.Writer) *Encoder {
|
||||||
tableSizeUpdate: false,
|
tableSizeUpdate: false,
|
||||||
w: w,
|
w: w,
|
||||||
}
|
}
|
||||||
|
e.dynTab.table.init()
|
||||||
e.dynTab.setMaxSize(initialHeaderTableSize)
|
e.dynTab.setMaxSize(initialHeaderTableSize)
|
||||||
return e
|
return e
|
||||||
}
|
}
|
||||||
|
@ -88,29 +89,17 @@ func (e *Encoder) WriteField(f HeaderField) error {
|
||||||
// only name matches, i points to that index and nameValueMatch
|
// only name matches, i points to that index and nameValueMatch
|
||||||
// becomes false.
|
// becomes false.
|
||||||
func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
|
func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
|
||||||
for idx, hf := range staticTable {
|
i, nameValueMatch = staticTable.search(f)
|
||||||
if !constantTimeStringCompare(hf.Name, f.Name) {
|
if nameValueMatch {
|
||||||
continue
|
return i, true
|
||||||
}
|
|
||||||
if i == 0 {
|
|
||||||
i = uint64(idx + 1)
|
|
||||||
}
|
|
||||||
if f.Sensitive {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !constantTimeStringCompare(hf.Value, f.Value) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
i = uint64(idx + 1)
|
|
||||||
nameValueMatch = true
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
j, nameValueMatch := e.dynTab.search(f)
|
j, nameValueMatch := e.dynTab.table.search(f)
|
||||||
if nameValueMatch || (i == 0 && j != 0) {
|
if nameValueMatch || (i == 0 && j != 0) {
|
||||||
i = j + uint64(len(staticTable))
|
return j + uint64(staticTable.len()), nameValueMatch
|
||||||
}
|
}
|
||||||
return
|
|
||||||
|
return i, false
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetMaxDynamicTableSize changes the dynamic header table size to v.
|
// SetMaxDynamicTableSize changes the dynamic header table size to v.
|
||||||
|
|
102
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
102
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
|
@ -57,7 +57,7 @@ func (hf HeaderField) String() string {
|
||||||
return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
|
return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Size returns the size of an entry per RFC 7540 section 5.2.
|
// Size returns the size of an entry per RFC 7541 section 4.1.
|
||||||
func (hf HeaderField) Size() uint32 {
|
func (hf HeaderField) Size() uint32 {
|
||||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
|
// http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
|
||||||
// "The size of the dynamic table is the sum of the size of
|
// "The size of the dynamic table is the sum of the size of
|
||||||
|
@ -102,6 +102,7 @@ func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decod
|
||||||
emit: emitFunc,
|
emit: emitFunc,
|
||||||
emitEnabled: true,
|
emitEnabled: true,
|
||||||
}
|
}
|
||||||
|
d.dynTab.table.init()
|
||||||
d.dynTab.allowedMaxSize = maxDynamicTableSize
|
d.dynTab.allowedMaxSize = maxDynamicTableSize
|
||||||
d.dynTab.setMaxSize(maxDynamicTableSize)
|
d.dynTab.setMaxSize(maxDynamicTableSize)
|
||||||
return d
|
return d
|
||||||
|
@ -154,12 +155,9 @@ func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
|
||||||
}
|
}
|
||||||
|
|
||||||
type dynamicTable struct {
|
type dynamicTable struct {
|
||||||
// ents is the FIFO described at
|
|
||||||
// http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
|
// http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
|
||||||
// The newest (low index) is append at the end, and items are
|
table headerFieldTable
|
||||||
// evicted from the front.
|
size uint32 // in bytes
|
||||||
ents []HeaderField
|
|
||||||
size uint32
|
|
||||||
maxSize uint32 // current maxSize
|
maxSize uint32 // current maxSize
|
||||||
allowedMaxSize uint32 // maxSize may go up to this, inclusive
|
allowedMaxSize uint32 // maxSize may go up to this, inclusive
|
||||||
}
|
}
|
||||||
|
@ -169,95 +167,45 @@ func (dt *dynamicTable) setMaxSize(v uint32) {
|
||||||
dt.evict()
|
dt.evict()
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: change dynamicTable to be a struct with a slice and a size int field,
|
|
||||||
// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1:
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Then make add increment the size. maybe the max size should move from Decoder to
|
|
||||||
// dynamicTable and add should return an ok bool if there was enough space.
|
|
||||||
//
|
|
||||||
// Later we'll need a remove operation on dynamicTable.
|
|
||||||
|
|
||||||
func (dt *dynamicTable) add(f HeaderField) {
|
func (dt *dynamicTable) add(f HeaderField) {
|
||||||
dt.ents = append(dt.ents, f)
|
dt.table.addEntry(f)
|
||||||
dt.size += f.Size()
|
dt.size += f.Size()
|
||||||
dt.evict()
|
dt.evict()
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we're too big, evict old stuff (front of the slice)
|
// If we're too big, evict old stuff.
|
||||||
func (dt *dynamicTable) evict() {
|
func (dt *dynamicTable) evict() {
|
||||||
base := dt.ents // keep base pointer of slice
|
var n int
|
||||||
for dt.size > dt.maxSize {
|
for dt.size > dt.maxSize && n < dt.table.len() {
|
||||||
dt.size -= dt.ents[0].Size()
|
dt.size -= dt.table.ents[n].Size()
|
||||||
dt.ents = dt.ents[1:]
|
n++
|
||||||
}
|
}
|
||||||
|
dt.table.evictOldest(n)
|
||||||
// Shift slice contents down if we evicted things.
|
|
||||||
if len(dt.ents) != len(base) {
|
|
||||||
copy(base, dt.ents)
|
|
||||||
dt.ents = base[:len(dt.ents)]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// constantTimeStringCompare compares string a and b in a constant
|
|
||||||
// time manner.
|
|
||||||
func constantTimeStringCompare(a, b string) bool {
|
|
||||||
if len(a) != len(b) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
c := byte(0)
|
|
||||||
|
|
||||||
for i := 0; i < len(a); i++ {
|
|
||||||
c |= a[i] ^ b[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
return c == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Search searches f in the table. The return value i is 0 if there is
|
|
||||||
// no name match. If there is name match or name/value match, i is the
|
|
||||||
// index of that entry (1-based). If both name and value match,
|
|
||||||
// nameValueMatch becomes true.
|
|
||||||
func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
|
|
||||||
l := len(dt.ents)
|
|
||||||
for j := l - 1; j >= 0; j-- {
|
|
||||||
ent := dt.ents[j]
|
|
||||||
if !constantTimeStringCompare(ent.Name, f.Name) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if i == 0 {
|
|
||||||
i = uint64(l - j)
|
|
||||||
}
|
|
||||||
if f.Sensitive {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !constantTimeStringCompare(ent.Value, f.Value) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
i = uint64(l - j)
|
|
||||||
nameValueMatch = true
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Decoder) maxTableIndex() int {
|
func (d *Decoder) maxTableIndex() int {
|
||||||
return len(d.dynTab.ents) + len(staticTable)
|
// This should never overflow. RFC 7540 Section 6.5.2 limits the size of
|
||||||
|
// the dynamic table to 2^32 bytes, where each entry will occupy more than
|
||||||
|
// one byte. Further, the staticTable has a fixed, small length.
|
||||||
|
return d.dynTab.table.len() + staticTable.len()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
|
func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
|
||||||
if i < 1 {
|
// See Section 2.3.3.
|
||||||
|
if i == 0 {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if i <= uint64(staticTable.len()) {
|
||||||
|
return staticTable.ents[i-1], true
|
||||||
|
}
|
||||||
if i > uint64(d.maxTableIndex()) {
|
if i > uint64(d.maxTableIndex()) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if i <= uint64(len(staticTable)) {
|
// In the dynamic table, newer entries have lower indices.
|
||||||
return staticTable[i-1], true
|
// However, dt.ents[0] is the oldest entry. Hence, dt.ents is
|
||||||
}
|
// the reversed dynamic table.
|
||||||
dents := d.dynTab.ents
|
dt := d.dynTab.table
|
||||||
return dents[len(dents)-(int(i)-len(staticTable))], true
|
return dt.ents[dt.len()-(int(i)-staticTable.len())], true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Decode decodes an entire block.
|
// Decode decodes an entire block.
|
||||||
|
|
255
vendor/golang.org/x/net/http2/hpack/tables.go
generated
vendored
255
vendor/golang.org/x/net/http2/hpack/tables.go
generated
vendored
|
@ -4,73 +4,200 @@
|
||||||
|
|
||||||
package hpack
|
package hpack
|
||||||
|
|
||||||
func pair(name, value string) HeaderField {
|
import (
|
||||||
return HeaderField{Name: name, Value: value}
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// headerFieldTable implements a list of HeaderFields.
|
||||||
|
// This is used to implement the static and dynamic tables.
|
||||||
|
type headerFieldTable struct {
|
||||||
|
// For static tables, entries are never evicted.
|
||||||
|
//
|
||||||
|
// For dynamic tables, entries are evicted from ents[0] and added to the end.
|
||||||
|
// Each entry has a unique id that starts at one and increments for each
|
||||||
|
// entry that is added. This unique id is stable across evictions, meaning
|
||||||
|
// it can be used as a pointer to a specific entry. As in hpack, unique ids
|
||||||
|
// are 1-based. The unique id for ents[k] is k + evictCount + 1.
|
||||||
|
//
|
||||||
|
// Zero is not a valid unique id.
|
||||||
|
//
|
||||||
|
// evictCount should not overflow in any remotely practical situation. In
|
||||||
|
// practice, we will have one dynamic table per HTTP/2 connection. If we
|
||||||
|
// assume a very powerful server that handles 1M QPS per connection and each
|
||||||
|
// request adds (then evicts) 100 entries from the table, it would still take
|
||||||
|
// 2M years for evictCount to overflow.
|
||||||
|
ents []HeaderField
|
||||||
|
evictCount uint64
|
||||||
|
|
||||||
|
// byName maps a HeaderField name to the unique id of the newest entry with
|
||||||
|
// the same name. See above for a definition of "unique id".
|
||||||
|
byName map[string]uint64
|
||||||
|
|
||||||
|
// byNameValue maps a HeaderField name/value pair to the unique id of the newest
|
||||||
|
// entry with the same name and value. See above for a definition of "unique id".
|
||||||
|
byNameValue map[pairNameValue]uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type pairNameValue struct {
|
||||||
|
name, value string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *headerFieldTable) init() {
|
||||||
|
t.byName = make(map[string]uint64)
|
||||||
|
t.byNameValue = make(map[pairNameValue]uint64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// len reports the number of entries in the table.
|
||||||
|
func (t *headerFieldTable) len() int {
|
||||||
|
return len(t.ents)
|
||||||
|
}
|
||||||
|
|
||||||
|
// addEntry adds a new entry.
|
||||||
|
func (t *headerFieldTable) addEntry(f HeaderField) {
|
||||||
|
id := uint64(t.len()) + t.evictCount + 1
|
||||||
|
t.byName[f.Name] = id
|
||||||
|
t.byNameValue[pairNameValue{f.Name, f.Value}] = id
|
||||||
|
t.ents = append(t.ents, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
// evictOldest evicts the n oldest entries in the table.
|
||||||
|
func (t *headerFieldTable) evictOldest(n int) {
|
||||||
|
if n > t.len() {
|
||||||
|
panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len()))
|
||||||
|
}
|
||||||
|
for k := 0; k < n; k++ {
|
||||||
|
f := t.ents[k]
|
||||||
|
id := t.evictCount + uint64(k) + 1
|
||||||
|
if t.byName[f.Name] == id {
|
||||||
|
delete(t.byName, f.Name)
|
||||||
|
}
|
||||||
|
if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id {
|
||||||
|
delete(t.byNameValue, p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
copy(t.ents, t.ents[n:])
|
||||||
|
for k := t.len() - n; k < t.len(); k++ {
|
||||||
|
t.ents[k] = HeaderField{} // so strings can be garbage collected
|
||||||
|
}
|
||||||
|
t.ents = t.ents[:t.len()-n]
|
||||||
|
if t.evictCount+uint64(n) < t.evictCount {
|
||||||
|
panic("evictCount overflow")
|
||||||
|
}
|
||||||
|
t.evictCount += uint64(n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// search finds f in the table. If there is no match, i is 0.
|
||||||
|
// If both name and value match, i is the matched index and nameValueMatch
|
||||||
|
// becomes true. If only name matches, i points to that index and
|
||||||
|
// nameValueMatch becomes false.
|
||||||
|
//
|
||||||
|
// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says
|
||||||
|
// that index 1 should be the newest entry, but t.ents[0] is the oldest entry,
|
||||||
|
// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic
|
||||||
|
// table, the return value i actually refers to the entry t.ents[t.len()-i].
|
||||||
|
//
|
||||||
|
// All tables are assumed to be a dynamic tables except for the global
|
||||||
|
// staticTable pointer.
|
||||||
|
//
|
||||||
|
// See Section 2.3.3.
|
||||||
|
func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
|
||||||
|
if !f.Sensitive {
|
||||||
|
if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 {
|
||||||
|
return t.idToIndex(id), true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if id := t.byName[f.Name]; id != 0 {
|
||||||
|
return t.idToIndex(id), false
|
||||||
|
}
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// idToIndex converts a unique id to an HPACK index.
|
||||||
|
// See Section 2.3.3.
|
||||||
|
func (t *headerFieldTable) idToIndex(id uint64) uint64 {
|
||||||
|
if id <= t.evictCount {
|
||||||
|
panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount))
|
||||||
|
}
|
||||||
|
k := id - t.evictCount - 1 // convert id to an index t.ents[k]
|
||||||
|
if t != staticTable {
|
||||||
|
return uint64(t.len()) - k // dynamic table
|
||||||
|
}
|
||||||
|
return k + 1
|
||||||
}
|
}
|
||||||
|
|
||||||
// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
|
// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
|
||||||
var staticTable = [...]HeaderField{
|
var staticTable = newStaticTable()
|
||||||
pair(":authority", ""), // index 1 (1-based)
|
var staticTableEntries = [...]HeaderField{
|
||||||
pair(":method", "GET"),
|
{Name: ":authority"},
|
||||||
pair(":method", "POST"),
|
{Name: ":method", Value: "GET"},
|
||||||
pair(":path", "/"),
|
{Name: ":method", Value: "POST"},
|
||||||
pair(":path", "/index.html"),
|
{Name: ":path", Value: "/"},
|
||||||
pair(":scheme", "http"),
|
{Name: ":path", Value: "/index.html"},
|
||||||
pair(":scheme", "https"),
|
{Name: ":scheme", Value: "http"},
|
||||||
pair(":status", "200"),
|
{Name: ":scheme", Value: "https"},
|
||||||
pair(":status", "204"),
|
{Name: ":status", Value: "200"},
|
||||||
pair(":status", "206"),
|
{Name: ":status", Value: "204"},
|
||||||
pair(":status", "304"),
|
{Name: ":status", Value: "206"},
|
||||||
pair(":status", "400"),
|
{Name: ":status", Value: "304"},
|
||||||
pair(":status", "404"),
|
{Name: ":status", Value: "400"},
|
||||||
pair(":status", "500"),
|
{Name: ":status", Value: "404"},
|
||||||
pair("accept-charset", ""),
|
{Name: ":status", Value: "500"},
|
||||||
pair("accept-encoding", "gzip, deflate"),
|
{Name: "accept-charset"},
|
||||||
pair("accept-language", ""),
|
{Name: "accept-encoding", Value: "gzip, deflate"},
|
||||||
pair("accept-ranges", ""),
|
{Name: "accept-language"},
|
||||||
pair("accept", ""),
|
{Name: "accept-ranges"},
|
||||||
pair("access-control-allow-origin", ""),
|
{Name: "accept"},
|
||||||
pair("age", ""),
|
{Name: "access-control-allow-origin"},
|
||||||
pair("allow", ""),
|
{Name: "age"},
|
||||||
pair("authorization", ""),
|
{Name: "allow"},
|
||||||
pair("cache-control", ""),
|
{Name: "authorization"},
|
||||||
pair("content-disposition", ""),
|
{Name: "cache-control"},
|
||||||
pair("content-encoding", ""),
|
{Name: "content-disposition"},
|
||||||
pair("content-language", ""),
|
{Name: "content-encoding"},
|
||||||
pair("content-length", ""),
|
{Name: "content-language"},
|
||||||
pair("content-location", ""),
|
{Name: "content-length"},
|
||||||
pair("content-range", ""),
|
{Name: "content-location"},
|
||||||
pair("content-type", ""),
|
{Name: "content-range"},
|
||||||
pair("cookie", ""),
|
{Name: "content-type"},
|
||||||
pair("date", ""),
|
{Name: "cookie"},
|
||||||
pair("etag", ""),
|
{Name: "date"},
|
||||||
pair("expect", ""),
|
{Name: "etag"},
|
||||||
pair("expires", ""),
|
{Name: "expect"},
|
||||||
pair("from", ""),
|
{Name: "expires"},
|
||||||
pair("host", ""),
|
{Name: "from"},
|
||||||
pair("if-match", ""),
|
{Name: "host"},
|
||||||
pair("if-modified-since", ""),
|
{Name: "if-match"},
|
||||||
pair("if-none-match", ""),
|
{Name: "if-modified-since"},
|
||||||
pair("if-range", ""),
|
{Name: "if-none-match"},
|
||||||
pair("if-unmodified-since", ""),
|
{Name: "if-range"},
|
||||||
pair("last-modified", ""),
|
{Name: "if-unmodified-since"},
|
||||||
pair("link", ""),
|
{Name: "last-modified"},
|
||||||
pair("location", ""),
|
{Name: "link"},
|
||||||
pair("max-forwards", ""),
|
{Name: "location"},
|
||||||
pair("proxy-authenticate", ""),
|
{Name: "max-forwards"},
|
||||||
pair("proxy-authorization", ""),
|
{Name: "proxy-authenticate"},
|
||||||
pair("range", ""),
|
{Name: "proxy-authorization"},
|
||||||
pair("referer", ""),
|
{Name: "range"},
|
||||||
pair("refresh", ""),
|
{Name: "referer"},
|
||||||
pair("retry-after", ""),
|
{Name: "refresh"},
|
||||||
pair("server", ""),
|
{Name: "retry-after"},
|
||||||
pair("set-cookie", ""),
|
{Name: "server"},
|
||||||
pair("strict-transport-security", ""),
|
{Name: "set-cookie"},
|
||||||
pair("transfer-encoding", ""),
|
{Name: "strict-transport-security"},
|
||||||
pair("user-agent", ""),
|
{Name: "transfer-encoding"},
|
||||||
pair("vary", ""),
|
{Name: "user-agent"},
|
||||||
pair("via", ""),
|
{Name: "vary"},
|
||||||
pair("www-authenticate", ""),
|
{Name: "via"},
|
||||||
|
{Name: "www-authenticate"},
|
||||||
|
}
|
||||||
|
|
||||||
|
func newStaticTable() *headerFieldTable {
|
||||||
|
t := &headerFieldTable{}
|
||||||
|
t.init()
|
||||||
|
for _, e := range staticTableEntries[:] {
|
||||||
|
t.addEntry(e)
|
||||||
|
}
|
||||||
|
return t
|
||||||
}
|
}
|
||||||
|
|
||||||
var huffmanCodes = [256]uint32{
|
var huffmanCodes = [256]uint32{
|
||||||
|
|
58
vendor/golang.org/x/net/http2/http2.go
generated
vendored
58
vendor/golang.org/x/net/http2/http2.go
generated
vendored
|
@ -13,7 +13,8 @@
|
||||||
// See https://http2.github.io/ for more information on HTTP/2.
|
// See https://http2.github.io/ for more information on HTTP/2.
|
||||||
//
|
//
|
||||||
// See https://http2.golang.org/ for a test server running this code.
|
// See https://http2.golang.org/ for a test server running this code.
|
||||||
package http2
|
//
|
||||||
|
package http2 // import "golang.org/x/net/http2"
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
|
@ -35,6 +36,7 @@ var (
|
||||||
VerboseLogs bool
|
VerboseLogs bool
|
||||||
logFrameWrites bool
|
logFrameWrites bool
|
||||||
logFrameReads bool
|
logFrameReads bool
|
||||||
|
inTests bool
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -76,13 +78,23 @@ var (
|
||||||
|
|
||||||
type streamState int
|
type streamState int
|
||||||
|
|
||||||
|
// HTTP/2 stream states.
|
||||||
|
//
|
||||||
|
// See http://tools.ietf.org/html/rfc7540#section-5.1.
|
||||||
|
//
|
||||||
|
// For simplicity, the server code merges "reserved (local)" into
|
||||||
|
// "half-closed (remote)". This is one less state transition to track.
|
||||||
|
// The only downside is that we send PUSH_PROMISEs slightly less
|
||||||
|
// liberally than allowable. More discussion here:
|
||||||
|
// https://lists.w3.org/Archives/Public/ietf-http-wg/2016JulSep/0599.html
|
||||||
|
//
|
||||||
|
// "reserved (remote)" is omitted since the client code does not
|
||||||
|
// support server push.
|
||||||
const (
|
const (
|
||||||
stateIdle streamState = iota
|
stateIdle streamState = iota
|
||||||
stateOpen
|
stateOpen
|
||||||
stateHalfClosedLocal
|
stateHalfClosedLocal
|
||||||
stateHalfClosedRemote
|
stateHalfClosedRemote
|
||||||
stateResvLocal
|
|
||||||
stateResvRemote
|
|
||||||
stateClosed
|
stateClosed
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -91,8 +103,6 @@ var stateName = [...]string{
|
||||||
stateOpen: "Open",
|
stateOpen: "Open",
|
||||||
stateHalfClosedLocal: "HalfClosedLocal",
|
stateHalfClosedLocal: "HalfClosedLocal",
|
||||||
stateHalfClosedRemote: "HalfClosedRemote",
|
stateHalfClosedRemote: "HalfClosedRemote",
|
||||||
stateResvLocal: "ResvLocal",
|
|
||||||
stateResvRemote: "ResvRemote",
|
|
||||||
stateClosed: "Closed",
|
stateClosed: "Closed",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -252,14 +262,27 @@ func newBufferedWriter(w io.Writer) *bufferedWriter {
|
||||||
return &bufferedWriter{w: w}
|
return &bufferedWriter{w: w}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// bufWriterPoolBufferSize is the size of bufio.Writer's
|
||||||
|
// buffers created using bufWriterPool.
|
||||||
|
//
|
||||||
|
// TODO: pick a less arbitrary value? this is a bit under
|
||||||
|
// (3 x typical 1500 byte MTU) at least. Other than that,
|
||||||
|
// not much thought went into it.
|
||||||
|
const bufWriterPoolBufferSize = 4 << 10
|
||||||
|
|
||||||
var bufWriterPool = sync.Pool{
|
var bufWriterPool = sync.Pool{
|
||||||
New: func() interface{} {
|
New: func() interface{} {
|
||||||
// TODO: pick something better? this is a bit under
|
return bufio.NewWriterSize(nil, bufWriterPoolBufferSize)
|
||||||
// (3 x typical 1500 byte MTU) at least.
|
|
||||||
return bufio.NewWriterSize(nil, 4<<10)
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *bufferedWriter) Available() int {
|
||||||
|
if w.bw == nil {
|
||||||
|
return bufWriterPoolBufferSize
|
||||||
|
}
|
||||||
|
return w.bw.Available()
|
||||||
|
}
|
||||||
|
|
||||||
func (w *bufferedWriter) Write(p []byte) (n int, err error) {
|
func (w *bufferedWriter) Write(p []byte) (n int, err error) {
|
||||||
if w.bw == nil {
|
if w.bw == nil {
|
||||||
bw := bufWriterPool.Get().(*bufio.Writer)
|
bw := bufWriterPool.Get().(*bufio.Writer)
|
||||||
|
@ -342,10 +365,27 @@ func (s *sorter) Keys(h http.Header) []string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *sorter) SortStrings(ss []string) {
|
func (s *sorter) SortStrings(ss []string) {
|
||||||
// Our sorter works on s.v, which sorter owners, so
|
// Our sorter works on s.v, which sorter owns, so
|
||||||
// stash it away while we sort the user's buffer.
|
// stash it away while we sort the user's buffer.
|
||||||
save := s.v
|
save := s.v
|
||||||
s.v = ss
|
s.v = ss
|
||||||
sort.Sort(s)
|
sort.Sort(s)
|
||||||
s.v = save
|
s.v = save
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// validPseudoPath reports whether v is a valid :path pseudo-header
|
||||||
|
// value. It must be either:
|
||||||
|
//
|
||||||
|
// *) a non-empty string starting with '/'
|
||||||
|
// *) the string '*', for OPTIONS requests.
|
||||||
|
//
|
||||||
|
// For now this is only used a quick check for deciding when to clean
|
||||||
|
// up Opaque URLs before sending requests from the Transport.
|
||||||
|
// See golang.org/issue/16847
|
||||||
|
//
|
||||||
|
// We used to enforce that the path also didn't start with "//", but
|
||||||
|
// Google's GFE accepts such paths and Chrome sends them, so ignore
|
||||||
|
// that part of the spec. See golang.org/issue/19103.
|
||||||
|
func validPseudoPath(v string) bool {
|
||||||
|
return (len(v) > 0 && v[0] == '/') || v == "*"
|
||||||
|
}
|
||||||
|
|
25
vendor/golang.org/x/net/http2/not_go16.go
generated
vendored
25
vendor/golang.org/x/net/http2/not_go16.go
generated
vendored
|
@ -7,7 +7,6 @@
|
||||||
package http2
|
package http2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"crypto/tls"
|
|
||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
@ -20,27 +19,3 @@ func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
|
|
||||||
func isBadCipher(cipher uint16) bool {
|
|
||||||
switch cipher {
|
|
||||||
case tls.TLS_RSA_WITH_RC4_128_SHA,
|
|
||||||
tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
tls.TLS_RSA_WITH_AES_128_CBC_SHA,
|
|
||||||
tls.TLS_RSA_WITH_AES_256_CBC_SHA,
|
|
||||||
tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
|
|
||||||
tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
|
|
||||||
tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
|
|
||||||
tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
|
|
||||||
tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
|
|
||||||
tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
|
|
||||||
tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
|
|
||||||
// Reject cipher suites from Appendix A.
|
|
||||||
// "This list includes those cipher suites that do not
|
|
||||||
// offer an ephemeral key exchange and those that are
|
|
||||||
// based on the TLS null, stream or block cipher type"
|
|
||||||
return true
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
38
vendor/golang.org/x/net/http2/not_go17.go
generated
vendored
38
vendor/golang.org/x/net/http2/not_go17.go
generated
vendored
|
@ -7,11 +7,16 @@
|
||||||
package http2
|
package http2
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/tls"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type contextContext interface{}
|
type contextContext interface {
|
||||||
|
Done() <-chan struct{}
|
||||||
|
Err() error
|
||||||
|
}
|
||||||
|
|
||||||
type fakeContext struct{}
|
type fakeContext struct{}
|
||||||
|
|
||||||
|
@ -49,3 +54,34 @@ func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
|
||||||
func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
|
func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
|
||||||
return req
|
return req
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// temporary copy of Go 1.6's private tls.Config.clone:
|
||||||
|
func cloneTLSConfig(c *tls.Config) *tls.Config {
|
||||||
|
return &tls.Config{
|
||||||
|
Rand: c.Rand,
|
||||||
|
Time: c.Time,
|
||||||
|
Certificates: c.Certificates,
|
||||||
|
NameToCertificate: c.NameToCertificate,
|
||||||
|
GetCertificate: c.GetCertificate,
|
||||||
|
RootCAs: c.RootCAs,
|
||||||
|
NextProtos: c.NextProtos,
|
||||||
|
ServerName: c.ServerName,
|
||||||
|
ClientAuth: c.ClientAuth,
|
||||||
|
ClientCAs: c.ClientCAs,
|
||||||
|
InsecureSkipVerify: c.InsecureSkipVerify,
|
||||||
|
CipherSuites: c.CipherSuites,
|
||||||
|
PreferServerCipherSuites: c.PreferServerCipherSuites,
|
||||||
|
SessionTicketsDisabled: c.SessionTicketsDisabled,
|
||||||
|
SessionTicketKey: c.SessionTicketKey,
|
||||||
|
ClientSessionCache: c.ClientSessionCache,
|
||||||
|
MinVersion: c.MinVersion,
|
||||||
|
MaxVersion: c.MaxVersion,
|
||||||
|
CurvePreferences: c.CurvePreferences,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cc *ClientConn) Ping(ctx contextContext) error {
|
||||||
|
return cc.ping(ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t *Transport) idleConnTimeout() time.Duration { return 0 }
|
||||||
|
|
29
vendor/golang.org/x/net/http2/not_go18.go
generated
vendored
Normal file
29
vendor/golang.org/x/net/http2/not_go18.go
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !go1.8
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func configureServer18(h1 *http.Server, h2 *Server) error {
|
||||||
|
// No IdleTimeout to sync prior to Go 1.8.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func shouldLogPanic(panicValue interface{}) bool {
|
||||||
|
return panicValue != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func reqGetBody(req *http.Request) func() (io.ReadCloser, error) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func reqBodyIsNoBody(io.ReadCloser) bool { return false }
|
||||||
|
|
||||||
|
func go18httpNoBody() io.ReadCloser { return nil } // for tests only
|
16
vendor/golang.org/x/net/http2/not_go19.go
generated
vendored
Normal file
16
vendor/golang.org/x/net/http2/not_go19.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
// Copyright 2016 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !go1.9
|
||||||
|
|
||||||
|
package http2
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
func configureServer19(s *http.Server, conf *Server) error {
|
||||||
|
// not supported prior to go1.9
|
||||||
|
return nil
|
||||||
|
}
|
14
vendor/golang.org/x/net/http2/pipe.go
generated
vendored
14
vendor/golang.org/x/net/http2/pipe.go
generated
vendored
|
@ -16,7 +16,7 @@ import (
|
||||||
type pipe struct {
|
type pipe struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
c sync.Cond // c.L lazily initialized to &p.mu
|
c sync.Cond // c.L lazily initialized to &p.mu
|
||||||
b pipeBuffer
|
b pipeBuffer // nil when done reading
|
||||||
err error // read error once empty. non-nil means closed.
|
err error // read error once empty. non-nil means closed.
|
||||||
breakErr error // immediate read error (caller doesn't see rest of b)
|
breakErr error // immediate read error (caller doesn't see rest of b)
|
||||||
donec chan struct{} // closed on error
|
donec chan struct{} // closed on error
|
||||||
|
@ -32,6 +32,9 @@ type pipeBuffer interface {
|
||||||
func (p *pipe) Len() int {
|
func (p *pipe) Len() int {
|
||||||
p.mu.Lock()
|
p.mu.Lock()
|
||||||
defer p.mu.Unlock()
|
defer p.mu.Unlock()
|
||||||
|
if p.b == nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
return p.b.Len()
|
return p.b.Len()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,7 +50,7 @@ func (p *pipe) Read(d []byte) (n int, err error) {
|
||||||
if p.breakErr != nil {
|
if p.breakErr != nil {
|
||||||
return 0, p.breakErr
|
return 0, p.breakErr
|
||||||
}
|
}
|
||||||
if p.b.Len() > 0 {
|
if p.b != nil && p.b.Len() > 0 {
|
||||||
return p.b.Read(d)
|
return p.b.Read(d)
|
||||||
}
|
}
|
||||||
if p.err != nil {
|
if p.err != nil {
|
||||||
|
@ -55,6 +58,7 @@ func (p *pipe) Read(d []byte) (n int, err error) {
|
||||||
p.readFn() // e.g. copy trailers
|
p.readFn() // e.g. copy trailers
|
||||||
p.readFn = nil // not sticky like p.err
|
p.readFn = nil // not sticky like p.err
|
||||||
}
|
}
|
||||||
|
p.b = nil
|
||||||
return 0, p.err
|
return 0, p.err
|
||||||
}
|
}
|
||||||
p.c.Wait()
|
p.c.Wait()
|
||||||
|
@ -75,6 +79,9 @@ func (p *pipe) Write(d []byte) (n int, err error) {
|
||||||
if p.err != nil {
|
if p.err != nil {
|
||||||
return 0, errClosedPipeWrite
|
return 0, errClosedPipeWrite
|
||||||
}
|
}
|
||||||
|
if p.breakErr != nil {
|
||||||
|
return len(d), nil // discard when there is no reader
|
||||||
|
}
|
||||||
return p.b.Write(d)
|
return p.b.Write(d)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -109,6 +116,9 @@ func (p *pipe) closeWithError(dst *error, err error, fn func()) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
p.readFn = fn
|
p.readFn = fn
|
||||||
|
if dst == &p.breakErr {
|
||||||
|
p.b = nil
|
||||||
|
}
|
||||||
*dst = err
|
*dst = err
|
||||||
p.closeDoneLocked()
|
p.closeDoneLocked()
|
||||||
}
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue