mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-09 23:24:05 -08:00
web: Remove APIv2 (#7935)
* web: Remove APIv2 Signed-off-by: Julien Pivotto <roidelapluie@inuits.eu>
This commit is contained in:
parent
7de6046d1e
commit
e208afcc95
|
@ -950,8 +950,6 @@ $ curl http://localhost:9090/api/v1/status/tsdb
|
|||
## TSDB Admin APIs
|
||||
These are APIs that expose database functionalities for the advanced user. These APIs are not enabled unless the `--web.enable-admin-api` is set.
|
||||
|
||||
We also expose a gRPC API whose definition can be found [here](https://github.com/prometheus/prometheus/blob/master/prompb/rpc.proto). This is experimental and might change in the future.
|
||||
|
||||
### Snapshot
|
||||
Snapshot creates a snapshot of all current data into `snapshots/<datetime>-<rand>` under the TSDB's data directory and returns the directory as response.
|
||||
It will optionally skip snapshotting data that is only present in the head block, and which has not yet been compacted to disk.
|
||||
|
|
|
@ -24,7 +24,6 @@ Things considered unstable for 2.x:
|
|||
* Any feature listed as experimental or subject to change, including:
|
||||
* The [`holt_winters` PromQL function](https://github.com/prometheus/prometheus/issues/2458)
|
||||
* Remote read, remote write and the remote read endpoint
|
||||
* v2 HTTP and GRPC APIs
|
||||
* Service discovery integrations, with the exception of `static_configs` and `file_sd_configs`
|
||||
* Go APIs of packages that are part of the server
|
||||
* HTML generated by the web UI
|
||||
|
|
|
@ -1,187 +0,0 @@
|
|||
{
|
||||
"swagger": "2.0",
|
||||
"info": {
|
||||
"title": "rpc.proto",
|
||||
"version": "version not set"
|
||||
},
|
||||
"consumes": [
|
||||
"application/json"
|
||||
],
|
||||
"produces": [
|
||||
"application/json"
|
||||
],
|
||||
"paths": {
|
||||
"/v2/admin/tsdb/clean_tombstones": {
|
||||
"post": {
|
||||
"summary": "CleanTombstones removes the deleted data from disk and cleans up the existing tombstones.",
|
||||
"operationId": "Admin_TSDBCleanTombstones",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/prometheusTSDBCleanTombstonesResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
},
|
||||
"tags": [
|
||||
"Admin"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/v2/admin/tsdb/delete_series": {
|
||||
"post": {
|
||||
"summary": "DeleteSeries deletes data for a selection of series in a time range.",
|
||||
"operationId": "Admin_DeleteSeries",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/prometheusSeriesDeleteResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"name": "body",
|
||||
"in": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/prometheusSeriesDeleteRequest"
|
||||
}
|
||||
}
|
||||
],
|
||||
"tags": [
|
||||
"Admin"
|
||||
]
|
||||
}
|
||||
},
|
||||
"/v2/admin/tsdb/snapshot": {
|
||||
"post": {
|
||||
"summary": "Snapshot creates a snapshot of all current data into 'snapshots/\u003cdatetime\u003e-\u003crand\u003e' under the TSDB's data directory.",
|
||||
"operationId": "Admin_TSDBSnapshot",
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "A successful response.",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/prometheusTSDBSnapshotResponse"
|
||||
}
|
||||
},
|
||||
"default": {
|
||||
"description": "An unexpected error response",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/runtimeError"
|
||||
}
|
||||
}
|
||||
},
|
||||
"tags": [
|
||||
"Admin"
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"definitions": {
|
||||
"prometheusLabelMatcher": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type": {
|
||||
"$ref": "#/definitions/prometheusLabelMatcherType"
|
||||
},
|
||||
"name": {
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
"type": "string"
|
||||
}
|
||||
},
|
||||
"description": "Matcher specifies a rule, which can match or set of labels or not."
|
||||
},
|
||||
"prometheusLabelMatcherType": {
|
||||
"type": "string",
|
||||
"enum": [
|
||||
"EQ",
|
||||
"NEQ",
|
||||
"RE",
|
||||
"NRE"
|
||||
],
|
||||
"default": "EQ"
|
||||
},
|
||||
"prometheusSeriesDeleteRequest": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"min_time": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"max_time": {
|
||||
"type": "string",
|
||||
"format": "date-time"
|
||||
},
|
||||
"matchers": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/prometheusLabelMatcher"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"prometheusSeriesDeleteResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"prometheusTSDBCleanTombstonesResponse": {
|
||||
"type": "object"
|
||||
},
|
||||
"prometheusTSDBSnapshotResponse": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"name": {
|
||||
"type": "string"
|
||||
}
|
||||
}
|
||||
},
|
||||
"protobufAny": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"type_url": {
|
||||
"type": "string"
|
||||
},
|
||||
"value": {
|
||||
"type": "string",
|
||||
"format": "byte"
|
||||
}
|
||||
}
|
||||
},
|
||||
"runtimeError": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"error": {
|
||||
"type": "string"
|
||||
},
|
||||
"code": {
|
||||
"type": "integer",
|
||||
"format": "int32"
|
||||
},
|
||||
"message": {
|
||||
"type": "string"
|
||||
},
|
||||
"details": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/definitions/protobufAny"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
7
go.mod
7
go.mod
|
@ -58,7 +58,6 @@ require (
|
|||
github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e
|
||||
github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
|
||||
github.com/shurcooL/vfsgen v0.0.0-20200627165143-92b8a710ab6c
|
||||
github.com/soheilhy/cmux v0.1.4
|
||||
github.com/uber/jaeger-client-go v2.25.0+incompatible
|
||||
github.com/uber/jaeger-lib v2.2.0+incompatible
|
||||
go.mongodb.org/mongo-driver v1.3.2 // indirect
|
||||
|
@ -72,8 +71,7 @@ require (
|
|||
golang.org/x/tools v0.0.0-20200822203824-307de81be3f4
|
||||
google.golang.org/api v0.29.0
|
||||
google.golang.org/appengine v1.6.6 // indirect
|
||||
google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70
|
||||
google.golang.org/grpc v1.29.1
|
||||
google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70 // indirect
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6
|
||||
gopkg.in/fsnotify/fsnotify.v1 v1.4.7
|
||||
gopkg.in/yaml.v2 v2.3.0
|
||||
|
@ -92,9 +90,6 @@ exclude (
|
|||
// Exclude grpc v1.30.0 because of breaking changes. See #7621.
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.14.7
|
||||
google.golang.org/api v0.30.0
|
||||
google.golang.org/grpc v1.30.0
|
||||
google.golang.org/grpc v1.31.0
|
||||
google.golang.org/grpc v1.32.0-dev
|
||||
|
||||
// Exclude pre-go-mod kubernetes tags, as they are older
|
||||
// than v0.x releases but are picked when we update the dependencies.
|
||||
|
|
1366
prompb/rpc.pb.go
1366
prompb/rpc.pb.go
File diff suppressed because it is too large
Load diff
|
@ -1,305 +0,0 @@
|
|||
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
|
||||
// source: rpc.proto
|
||||
|
||||
/*
|
||||
Package prompb is a reverse proxy.
|
||||
|
||||
It translates gRPC into RESTful JSON APIs.
|
||||
*/
|
||||
package prompb
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/utilities"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// Suppress "imported and not used" errors
|
||||
var _ codes.Code
|
||||
var _ io.Reader
|
||||
var _ status.Status
|
||||
var _ = runtime.String
|
||||
var _ = utilities.NewDoubleArray
|
||||
var _ = descriptor.ForMessage
|
||||
|
||||
var (
|
||||
filter_Admin_TSDBSnapshot_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)}
|
||||
)
|
||||
|
||||
func request_Admin_TSDBSnapshot_0(ctx context.Context, marshaler runtime.Marshaler, client AdminClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq TSDBSnapshotRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Admin_TSDBSnapshot_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.TSDBSnapshot(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_Admin_TSDBSnapshot_0(ctx context.Context, marshaler runtime.Marshaler, server AdminServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq TSDBSnapshotRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
if err := req.ParseForm(); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Admin_TSDBSnapshot_0); err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := server.TSDBSnapshot(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_Admin_TSDBCleanTombstones_0(ctx context.Context, marshaler runtime.Marshaler, client AdminClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq TSDBCleanTombstonesRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
msg, err := client.TSDBCleanTombstones(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_Admin_TSDBCleanTombstones_0(ctx context.Context, marshaler runtime.Marshaler, server AdminServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq TSDBCleanTombstonesRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
msg, err := server.TSDBCleanTombstones(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_Admin_DeleteSeries_0(ctx context.Context, marshaler runtime.Marshaler, client AdminClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq SeriesDeleteRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := client.DeleteSeries(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_Admin_DeleteSeries_0(ctx context.Context, marshaler runtime.Marshaler, server AdminServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq SeriesDeleteRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
msg, err := server.DeleteSeries(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
// RegisterAdminHandlerServer registers the http handlers for service Admin to "mux".
|
||||
// UnaryRPC :call AdminServer directly.
|
||||
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
|
||||
func RegisterAdminHandlerServer(ctx context.Context, mux *runtime.ServeMux, server AdminServer) error {
|
||||
|
||||
mux.Handle("POST", pattern_Admin_TSDBSnapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_Admin_TSDBSnapshot_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Admin_TSDBSnapshot_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_Admin_TSDBCleanTombstones_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_Admin_TSDBCleanTombstones_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Admin_TSDBCleanTombstones_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_Admin_DeleteSeries_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_Admin_DeleteSeries_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Admin_DeleteSeries_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterAdminHandlerFromEndpoint is same as RegisterAdminHandler but
|
||||
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
|
||||
func RegisterAdminHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
|
||||
conn, err := grpc.Dial(endpoint, opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
return
|
||||
}
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
if cerr := conn.Close(); cerr != nil {
|
||||
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
|
||||
}
|
||||
}()
|
||||
}()
|
||||
|
||||
return RegisterAdminHandler(ctx, mux, conn)
|
||||
}
|
||||
|
||||
// RegisterAdminHandler registers the http handlers for service Admin to "mux".
|
||||
// The handlers forward requests to the grpc endpoint over "conn".
|
||||
func RegisterAdminHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
|
||||
return RegisterAdminHandlerClient(ctx, mux, NewAdminClient(conn))
|
||||
}
|
||||
|
||||
// RegisterAdminHandlerClient registers the http handlers for service Admin
|
||||
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "AdminClient".
|
||||
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "AdminClient"
|
||||
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
|
||||
// "AdminClient" to call the correct interceptors.
|
||||
func RegisterAdminHandlerClient(ctx context.Context, mux *runtime.ServeMux, client AdminClient) error {
|
||||
|
||||
mux.Handle("POST", pattern_Admin_TSDBSnapshot_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Admin_TSDBSnapshot_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Admin_TSDBSnapshot_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_Admin_TSDBCleanTombstones_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Admin_TSDBCleanTombstones_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Admin_TSDBCleanTombstones_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_Admin_DeleteSeries_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_Admin_DeleteSeries_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_Admin_DeleteSeries_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
pattern_Admin_TSDBSnapshot_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v2", "admin", "tsdb", "snapshot"}, "", runtime.AssumeColonVerbOpt(true)))
|
||||
|
||||
pattern_Admin_TSDBCleanTombstones_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v2", "admin", "tsdb", "clean_tombstones"}, "", runtime.AssumeColonVerbOpt(true)))
|
||||
|
||||
pattern_Admin_DeleteSeries_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v2", "admin", "tsdb", "delete_series"}, "", runtime.AssumeColonVerbOpt(true)))
|
||||
)
|
||||
|
||||
var (
|
||||
forward_Admin_TSDBSnapshot_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_Admin_TSDBCleanTombstones_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_Admin_DeleteSeries_0 = runtime.ForwardResponseMessage
|
||||
)
|
|
@ -1,76 +0,0 @@
|
|||
// Copyright 2017 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
package prometheus;
|
||||
|
||||
import "types.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "google/api/annotations.proto";
|
||||
import "gogoproto/gogo.proto";
|
||||
|
||||
option go_package = "prompb";
|
||||
|
||||
option (gogoproto.sizer_all) = true;
|
||||
option (gogoproto.marshaler_all) = true;
|
||||
option (gogoproto.unmarshaler_all) = true;
|
||||
option (gogoproto.goproto_getters_all) = false;
|
||||
|
||||
|
||||
service Admin {
|
||||
// Snapshot creates a snapshot of all current data into 'snapshots/<datetime>-<rand>' under the TSDB's data directory.
|
||||
rpc TSDBSnapshot(TSDBSnapshotRequest) returns (TSDBSnapshotResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v2/admin/tsdb/snapshot"
|
||||
};
|
||||
}
|
||||
|
||||
// CleanTombstones removes the deleted data from disk and cleans up the existing tombstones.
|
||||
rpc TSDBCleanTombstones(TSDBCleanTombstonesRequest) returns (TSDBCleanTombstonesResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v2/admin/tsdb/clean_tombstones"
|
||||
};
|
||||
}
|
||||
|
||||
// DeleteSeries deletes data for a selection of series in a time range.
|
||||
rpc DeleteSeries(SeriesDeleteRequest) returns (SeriesDeleteResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/v2/admin/tsdb/delete_series"
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
message TSDBSnapshotRequest {
|
||||
bool skip_head = 1;
|
||||
}
|
||||
|
||||
message TSDBSnapshotResponse {
|
||||
string name = 1;
|
||||
}
|
||||
|
||||
message TSDBCleanTombstonesRequest {
|
||||
}
|
||||
|
||||
message TSDBCleanTombstonesResponse {
|
||||
}
|
||||
|
||||
message SeriesDeleteRequest {
|
||||
google.protobuf.Timestamp min_time = 1 [(gogoproto.stdtime) = true];
|
||||
google.protobuf.Timestamp max_time = 2 [(gogoproto.stdtime) = true];
|
||||
repeated LabelMatcher matchers = 3 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message SeriesDeleteResponse {
|
||||
}
|
|
@ -40,15 +40,6 @@ for dir in ${DIRS}; do
|
|||
-I="${GRPC_GATEWAY_ROOT}/third_party/googleapis" \
|
||||
./*.proto
|
||||
|
||||
protoc -I. \
|
||||
-I="${GOGOPROTO_PATH}" \
|
||||
-I="${PROM_PATH}" \
|
||||
-I="${GRPC_GATEWAY_ROOT}/third_party/googleapis" \
|
||||
--grpc-gateway_out=logtostderr=true:. \
|
||||
--swagger_out=logtostderr=true:../documentation/dev/api/ \
|
||||
rpc.proto
|
||||
mv ../documentation/dev/api/rpc.swagger.json ../documentation/dev/api/swagger.json
|
||||
|
||||
sed -i.bak -E 's/import _ \"github.com\/gogo\/protobuf\/gogoproto\"//g' -- *.pb.go
|
||||
sed -i.bak -E 's/import _ \"google\/protobuf\"//g' -- *.pb.go
|
||||
sed -i.bak -E 's/\t_ \"google\/protobuf\"//g' -- *.pb.go
|
||||
|
|
140
vendor/github.com/gogo/protobuf/types/any.go
generated
vendored
140
vendor/github.com/gogo/protobuf/types/any.go
generated
vendored
|
@ -1,140 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package types
|
||||
|
||||
// This file implements functions to marshal proto.Message to/from
|
||||
// google.protobuf.Any message.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
)
|
||||
|
||||
const googleApis = "type.googleapis.com/"
|
||||
|
||||
// AnyMessageName returns the name of the message contained in a google.protobuf.Any message.
|
||||
//
|
||||
// Note that regular type assertions should be done using the Is
|
||||
// function. AnyMessageName is provided for less common use cases like filtering a
|
||||
// sequence of Any messages based on a set of allowed message type names.
|
||||
func AnyMessageName(any *Any) (string, error) {
|
||||
if any == nil {
|
||||
return "", fmt.Errorf("message is nil")
|
||||
}
|
||||
slash := strings.LastIndex(any.TypeUrl, "/")
|
||||
if slash < 0 {
|
||||
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
|
||||
}
|
||||
return any.TypeUrl[slash+1:], nil
|
||||
}
|
||||
|
||||
// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any.
|
||||
func MarshalAny(pb proto.Message) (*Any, error) {
|
||||
value, err := proto.Marshal(pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil
|
||||
}
|
||||
|
||||
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
|
||||
// allocate a proto.Message for the type specified in a google.protobuf.Any
|
||||
// message. The allocated message is stored in the embedded proto.Message.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// var x ptypes.DynamicAny
|
||||
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
|
||||
// fmt.Printf("unmarshaled message: %v", x.Message)
|
||||
type DynamicAny struct {
|
||||
proto.Message
|
||||
}
|
||||
|
||||
// Empty returns a new proto.Message of the type specified in a
|
||||
// google.protobuf.Any message. It returns an error if corresponding message
|
||||
// type isn't linked in.
|
||||
func EmptyAny(any *Any) (proto.Message, error) {
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t := proto.MessageType(aname)
|
||||
if t == nil {
|
||||
return nil, fmt.Errorf("any: message type %q isn't linked in", aname)
|
||||
}
|
||||
return reflect.New(t.Elem()).Interface().(proto.Message), nil
|
||||
}
|
||||
|
||||
// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any
|
||||
// message and places the decoded result in pb. It returns an error if type of
|
||||
// contents of Any message does not match type of pb message.
|
||||
//
|
||||
// pb can be a proto.Message, or a *DynamicAny.
|
||||
func UnmarshalAny(any *Any, pb proto.Message) error {
|
||||
if d, ok := pb.(*DynamicAny); ok {
|
||||
if d.Message == nil {
|
||||
var err error
|
||||
d.Message, err = EmptyAny(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return UnmarshalAny(any, d.Message)
|
||||
}
|
||||
|
||||
aname, err := AnyMessageName(any)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mname := proto.MessageName(pb)
|
||||
if aname != mname {
|
||||
return fmt.Errorf("mismatched message type: got %q want %q", aname, mname)
|
||||
}
|
||||
return proto.Unmarshal(any.Value, pb)
|
||||
}
|
||||
|
||||
// Is returns true if any value contains a given message type.
|
||||
func Is(any *Any, pb proto.Message) bool {
|
||||
// The following is equivalent to AnyMessageName(any) == proto.MessageName(pb),
|
||||
// but it avoids scanning TypeUrl for the slash.
|
||||
if any == nil {
|
||||
return false
|
||||
}
|
||||
name := proto.MessageName(pb)
|
||||
prefix := len(any.TypeUrl) - len(name)
|
||||
return prefix >= 1 && any.TypeUrl[prefix-1] == '/' && any.TypeUrl[prefix:] == name
|
||||
}
|
697
vendor/github.com/gogo/protobuf/types/any.pb.go
generated
vendored
697
vendor/github.com/gogo/protobuf/types/any.pb.go
generated
vendored
|
@ -1,697 +0,0 @@
|
|||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: google/protobuf/any.proto
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
bytes "bytes"
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// `Any` contains an arbitrary serialized protocol buffer message along with a
|
||||
// URL that describes the type of the serialized message.
|
||||
//
|
||||
// Protobuf library provides support to pack/unpack Any values in the form
|
||||
// of utility functions or additional generated methods of the Any type.
|
||||
//
|
||||
// Example 1: Pack and unpack a message in C++.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any;
|
||||
// any.PackFrom(foo);
|
||||
// ...
|
||||
// if (any.UnpackTo(&foo)) {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// Example 2: Pack and unpack a message in Java.
|
||||
//
|
||||
// Foo foo = ...;
|
||||
// Any any = Any.pack(foo);
|
||||
// ...
|
||||
// if (any.is(Foo.class)) {
|
||||
// foo = any.unpack(Foo.class);
|
||||
// }
|
||||
//
|
||||
// Example 3: Pack and unpack a message in Python.
|
||||
//
|
||||
// foo = Foo(...)
|
||||
// any = Any()
|
||||
// any.Pack(foo)
|
||||
// ...
|
||||
// if any.Is(Foo.DESCRIPTOR):
|
||||
// any.Unpack(foo)
|
||||
// ...
|
||||
//
|
||||
// Example 4: Pack and unpack a message in Go
|
||||
//
|
||||
// foo := &pb.Foo{...}
|
||||
// any, err := ptypes.MarshalAny(foo)
|
||||
// ...
|
||||
// foo := &pb.Foo{}
|
||||
// if err := ptypes.UnmarshalAny(any, foo); err != nil {
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// The pack methods provided by protobuf library will by default use
|
||||
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
|
||||
// methods only use the fully qualified type name after the last '/'
|
||||
// in the type URL, for example "foo.bar.com/x/y.z" will yield type
|
||||
// name "y.z".
|
||||
//
|
||||
//
|
||||
// JSON
|
||||
// ====
|
||||
// The JSON representation of an `Any` value uses the regular
|
||||
// representation of the deserialized, embedded message, with an
|
||||
// additional field `@type` which contains the type URL. Example:
|
||||
//
|
||||
// package google.profile;
|
||||
// message Person {
|
||||
// string first_name = 1;
|
||||
// string last_name = 2;
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.profile.Person",
|
||||
// "firstName": <string>,
|
||||
// "lastName": <string>
|
||||
// }
|
||||
//
|
||||
// If the embedded message type is well-known and has a custom JSON
|
||||
// representation, that representation will be embedded adding a field
|
||||
// `value` which holds the custom JSON in addition to the `@type`
|
||||
// field. Example (for message [google.protobuf.Duration][]):
|
||||
//
|
||||
// {
|
||||
// "@type": "type.googleapis.com/google.protobuf.Duration",
|
||||
// "value": "1.212s"
|
||||
// }
|
||||
//
|
||||
type Any struct {
|
||||
// A URL/resource name that uniquely identifies the type of the serialized
|
||||
// protocol buffer message. This string must contain at least
|
||||
// one "/" character. The last segment of the URL's path must represent
|
||||
// the fully qualified name of the type (as in
|
||||
// `path/google.protobuf.Duration`). The name should be in a canonical form
|
||||
// (e.g., leading "." is not accepted).
|
||||
//
|
||||
// In practice, teams usually precompile into the binary all types that they
|
||||
// expect it to use in the context of Any. However, for URLs which use the
|
||||
// scheme `http`, `https`, or no scheme, one can optionally set up a type
|
||||
// server that maps type URLs to message definitions as follows:
|
||||
//
|
||||
// * If no scheme is provided, `https` is assumed.
|
||||
// * An HTTP GET on the URL must yield a [google.protobuf.Type][]
|
||||
// value in binary format, or produce an error.
|
||||
// * Applications are allowed to cache lookup results based on the
|
||||
// URL, or have them precompiled into a binary to avoid any
|
||||
// lookup. Therefore, binary compatibility needs to be preserved
|
||||
// on changes to types. (Use versioned type names to manage
|
||||
// breaking changes.)
|
||||
//
|
||||
// Note: this functionality is not currently available in the official
|
||||
// protobuf release, and it is not used for type URLs beginning with
|
||||
// type.googleapis.com.
|
||||
//
|
||||
// Schemes other than `http`, `https` (or the empty scheme) might be
|
||||
// used with implementation specific semantics.
|
||||
//
|
||||
TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"`
|
||||
// Must be a valid serialized protocol buffer of the above specified type.
|
||||
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Any) Reset() { *m = Any{} }
|
||||
func (*Any) ProtoMessage() {}
|
||||
func (*Any) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b53526c13ae22eb4, []int{0}
|
||||
}
|
||||
func (*Any) XXX_WellKnownType() string { return "Any" }
|
||||
func (m *Any) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Any.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Any) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Any.Merge(m, src)
|
||||
}
|
||||
func (m *Any) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Any) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Any.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Any proto.InternalMessageInfo
|
||||
|
||||
func (m *Any) GetTypeUrl() string {
|
||||
if m != nil {
|
||||
return m.TypeUrl
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *Any) GetValue() []byte {
|
||||
if m != nil {
|
||||
return m.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*Any) XXX_MessageName() string {
|
||||
return "google.protobuf.Any"
|
||||
}
|
||||
func init() {
|
||||
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_b53526c13ae22eb4) }
|
||||
|
||||
var fileDescriptor_b53526c13ae22eb4 = []byte{
|
||||
// 211 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
|
||||
0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
|
||||
0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
|
||||
0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
|
||||
0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xaa, 0xbf, 0xf1, 0x50, 0x8e,
|
||||
0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x1f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, 0x31, 0xae, 0x78, 0x24,
|
||||
0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78,
|
||||
0x24, 0xc7, 0xf0, 0x01, 0x24, 0xfe, 0x58, 0x8e, 0xf1, 0xc4, 0x63, 0x39, 0x46, 0x2e, 0xe1, 0xe4,
|
||||
0xfc, 0x5c, 0x3d, 0x34, 0xeb, 0x9d, 0x38, 0x1c, 0xf3, 0x2a, 0x03, 0x40, 0x9c, 0x00, 0xc6, 0x28,
|
||||
0x56, 0x90, 0x8d, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x94,
|
||||
0x06, 0x40, 0x95, 0xea, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, 0x94,
|
||||
0x25, 0xb1, 0x81, 0xcd, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x81, 0x82, 0xd3, 0xed,
|
||||
0x00, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (this *Any) Compare(that interface{}) int {
|
||||
if that == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
that1, ok := that.(*Any)
|
||||
if !ok {
|
||||
that2, ok := that.(Any)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
} else if this == nil {
|
||||
return -1
|
||||
}
|
||||
if this.TypeUrl != that1.TypeUrl {
|
||||
if this.TypeUrl < that1.TypeUrl {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
if c := bytes.Compare(this.Value, that1.Value); c != 0 {
|
||||
return c
|
||||
}
|
||||
if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
|
||||
return c
|
||||
}
|
||||
return 0
|
||||
}
|
||||
func (this *Any) Equal(that interface{}) bool {
|
||||
if that == nil {
|
||||
return this == nil
|
||||
}
|
||||
|
||||
that1, ok := that.(*Any)
|
||||
if !ok {
|
||||
that2, ok := that.(Any)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
return this == nil
|
||||
} else if this == nil {
|
||||
return false
|
||||
}
|
||||
if this.TypeUrl != that1.TypeUrl {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(this.Value, that1.Value) {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
func (this *Any) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&types.Any{")
|
||||
s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n")
|
||||
s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringAny(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func (m *Any) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Any) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Any) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if len(m.Value) > 0 {
|
||||
i -= len(m.Value)
|
||||
copy(dAtA[i:], m.Value)
|
||||
i = encodeVarintAny(dAtA, i, uint64(len(m.Value)))
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
if len(m.TypeUrl) > 0 {
|
||||
i -= len(m.TypeUrl)
|
||||
copy(dAtA[i:], m.TypeUrl)
|
||||
i = encodeVarintAny(dAtA, i, uint64(len(m.TypeUrl)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintAny(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovAny(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func NewPopulatedAny(r randyAny, easy bool) *Any {
|
||||
this := &Any{}
|
||||
this.TypeUrl = string(randStringAny(r))
|
||||
v1 := r.Intn(100)
|
||||
this.Value = make([]byte, v1)
|
||||
for i := 0; i < v1; i++ {
|
||||
this.Value[i] = byte(r.Intn(256))
|
||||
}
|
||||
if !easy && r.Intn(10) != 0 {
|
||||
this.XXX_unrecognized = randUnrecognizedAny(r, 3)
|
||||
}
|
||||
return this
|
||||
}
|
||||
|
||||
type randyAny interface {
|
||||
Float32() float32
|
||||
Float64() float64
|
||||
Int63() int64
|
||||
Int31() int32
|
||||
Uint32() uint32
|
||||
Intn(n int) int
|
||||
}
|
||||
|
||||
func randUTF8RuneAny(r randyAny) rune {
|
||||
ru := r.Intn(62)
|
||||
if ru < 10 {
|
||||
return rune(ru + 48)
|
||||
} else if ru < 36 {
|
||||
return rune(ru + 55)
|
||||
}
|
||||
return rune(ru + 61)
|
||||
}
|
||||
func randStringAny(r randyAny) string {
|
||||
v2 := r.Intn(100)
|
||||
tmps := make([]rune, v2)
|
||||
for i := 0; i < v2; i++ {
|
||||
tmps[i] = randUTF8RuneAny(r)
|
||||
}
|
||||
return string(tmps)
|
||||
}
|
||||
func randUnrecognizedAny(r randyAny, maxFieldNumber int) (dAtA []byte) {
|
||||
l := r.Intn(5)
|
||||
for i := 0; i < l; i++ {
|
||||
wire := r.Intn(4)
|
||||
if wire == 3 {
|
||||
wire = 5
|
||||
}
|
||||
fieldNumber := maxFieldNumber + r.Intn(100)
|
||||
dAtA = randFieldAny(dAtA, r, fieldNumber, wire)
|
||||
}
|
||||
return dAtA
|
||||
}
|
||||
func randFieldAny(dAtA []byte, r randyAny, fieldNumber int, wire int) []byte {
|
||||
key := uint32(fieldNumber)<<3 | uint32(wire)
|
||||
switch wire {
|
||||
case 0:
|
||||
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
|
||||
v3 := r.Int63()
|
||||
if r.Intn(2) == 0 {
|
||||
v3 *= -1
|
||||
}
|
||||
dAtA = encodeVarintPopulateAny(dAtA, uint64(v3))
|
||||
case 1:
|
||||
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
|
||||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||
case 2:
|
||||
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
|
||||
ll := r.Intn(100)
|
||||
dAtA = encodeVarintPopulateAny(dAtA, uint64(ll))
|
||||
for j := 0; j < ll; j++ {
|
||||
dAtA = append(dAtA, byte(r.Intn(256)))
|
||||
}
|
||||
default:
|
||||
dAtA = encodeVarintPopulateAny(dAtA, uint64(key))
|
||||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||
}
|
||||
return dAtA
|
||||
}
|
||||
func encodeVarintPopulateAny(dAtA []byte, v uint64) []byte {
|
||||
for v >= 1<<7 {
|
||||
dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))
|
||||
v >>= 7
|
||||
}
|
||||
dAtA = append(dAtA, uint8(v))
|
||||
return dAtA
|
||||
}
|
||||
func (m *Any) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.TypeUrl)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovAny(uint64(l))
|
||||
}
|
||||
l = len(m.Value)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovAny(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovAny(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozAny(x uint64) (n int) {
|
||||
return sovAny(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (this *Any) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&Any{`,
|
||||
`TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`,
|
||||
`Value:` + fmt.Sprintf("%v", this.Value) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func valueToStringAny(v interface{}) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("*%v", pv)
|
||||
}
|
||||
func (m *Any) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowAny
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: Any: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: Any: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowAny
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthAny
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthAny
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.TypeUrl = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
|
||||
}
|
||||
var byteLen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowAny
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
byteLen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if byteLen < 0 {
|
||||
return ErrInvalidLengthAny
|
||||
}
|
||||
postIndex := iNdEx + byteLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthAny
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
|
||||
if m.Value == nil {
|
||||
m.Value = []byte{}
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipAny(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthAny
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthAny
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipAny(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowAny
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowAny
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowAny
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthAny
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupAny
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthAny
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthAny = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowAny = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupAny = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
2143
vendor/github.com/gogo/protobuf/types/api.pb.go
generated
vendored
2143
vendor/github.com/gogo/protobuf/types/api.pb.go
generated
vendored
File diff suppressed because it is too large
Load diff
35
vendor/github.com/gogo/protobuf/types/doc.go
generated
vendored
35
vendor/github.com/gogo/protobuf/types/doc.go
generated
vendored
|
@ -1,35 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
/*
|
||||
Package types contains code for interacting with well-known types.
|
||||
*/
|
||||
package types
|
100
vendor/github.com/gogo/protobuf/types/duration.go
generated
vendored
100
vendor/github.com/gogo/protobuf/types/duration.go
generated
vendored
|
@ -1,100 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package types
|
||||
|
||||
// This file implements conversions between google.protobuf.Duration
|
||||
// and time.Duration.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// Range of a Duration in seconds, as specified in
|
||||
// google/protobuf/duration.proto. This is about 10,000 years in seconds.
|
||||
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
|
||||
minSeconds = -maxSeconds
|
||||
)
|
||||
|
||||
// validateDuration determines whether the Duration is valid according to the
|
||||
// definition in google/protobuf/duration.proto. A valid Duration
|
||||
// may still be too large to fit into a time.Duration (the range of Duration
|
||||
// is about 10,000 years, and the range of time.Duration is about 290).
|
||||
func validateDuration(d *Duration) error {
|
||||
if d == nil {
|
||||
return errors.New("duration: nil Duration")
|
||||
}
|
||||
if d.Seconds < minSeconds || d.Seconds > maxSeconds {
|
||||
return fmt.Errorf("duration: %#v: seconds out of range", d)
|
||||
}
|
||||
if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
|
||||
return fmt.Errorf("duration: %#v: nanos out of range", d)
|
||||
}
|
||||
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
|
||||
if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
|
||||
return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DurationFromProto converts a Duration to a time.Duration. DurationFromProto
|
||||
// returns an error if the Duration is invalid or is too large to be
|
||||
// represented in a time.Duration.
|
||||
func DurationFromProto(p *Duration) (time.Duration, error) {
|
||||
if err := validateDuration(p); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
d := time.Duration(p.Seconds) * time.Second
|
||||
if int64(d/time.Second) != p.Seconds {
|
||||
return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
|
||||
}
|
||||
if p.Nanos != 0 {
|
||||
d += time.Duration(p.Nanos) * time.Nanosecond
|
||||
if (d < 0) != (p.Nanos < 0) {
|
||||
return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
|
||||
}
|
||||
}
|
||||
return d, nil
|
||||
}
|
||||
|
||||
// DurationProto converts a time.Duration to a Duration.
|
||||
func DurationProto(d time.Duration) *Duration {
|
||||
nanos := d.Nanoseconds()
|
||||
secs := nanos / 1e9
|
||||
nanos -= secs * 1e9
|
||||
return &Duration{
|
||||
Seconds: secs,
|
||||
Nanos: int32(nanos),
|
||||
}
|
||||
}
|
520
vendor/github.com/gogo/protobuf/types/duration.pb.go
generated
vendored
520
vendor/github.com/gogo/protobuf/types/duration.pb.go
generated
vendored
|
@ -1,520 +0,0 @@
|
|||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: google/protobuf/duration.proto
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
bytes "bytes"
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// A Duration represents a signed, fixed-length span of time represented
|
||||
// as a count of seconds and fractions of seconds at nanosecond
|
||||
// resolution. It is independent of any calendar and concepts like "day"
|
||||
// or "month". It is related to Timestamp in that the difference between
|
||||
// two Timestamp values is a Duration and it can be added or subtracted
|
||||
// from a Timestamp. Range is approximately +-10,000 years.
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
// Example 1: Compute Duration from two Timestamps in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Timestamp end = ...;
|
||||
// Duration duration = ...;
|
||||
//
|
||||
// duration.seconds = end.seconds - start.seconds;
|
||||
// duration.nanos = end.nanos - start.nanos;
|
||||
//
|
||||
// if (duration.seconds < 0 && duration.nanos > 0) {
|
||||
// duration.seconds += 1;
|
||||
// duration.nanos -= 1000000000;
|
||||
// } else if (durations.seconds > 0 && duration.nanos < 0) {
|
||||
// duration.seconds -= 1;
|
||||
// duration.nanos += 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code.
|
||||
//
|
||||
// Timestamp start = ...;
|
||||
// Duration duration = ...;
|
||||
// Timestamp end = ...;
|
||||
//
|
||||
// end.seconds = start.seconds + duration.seconds;
|
||||
// end.nanos = start.nanos + duration.nanos;
|
||||
//
|
||||
// if (end.nanos < 0) {
|
||||
// end.seconds -= 1;
|
||||
// end.nanos += 1000000000;
|
||||
// } else if (end.nanos >= 1000000000) {
|
||||
// end.seconds += 1;
|
||||
// end.nanos -= 1000000000;
|
||||
// }
|
||||
//
|
||||
// Example 3: Compute Duration from datetime.timedelta in Python.
|
||||
//
|
||||
// td = datetime.timedelta(days=3, minutes=10)
|
||||
// duration = Duration()
|
||||
// duration.FromTimedelta(td)
|
||||
//
|
||||
// # JSON Mapping
|
||||
//
|
||||
// In JSON format, the Duration type is encoded as a string rather than an
|
||||
// object, where the string ends in the suffix "s" (indicating seconds) and
|
||||
// is preceded by the number of seconds, with nanoseconds expressed as
|
||||
// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
|
||||
// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
|
||||
// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
|
||||
// microsecond should be expressed in JSON format as "3.000001s".
|
||||
//
|
||||
//
|
||||
type Duration struct {
|
||||
// Signed seconds of the span of time. Must be from -315,576,000,000
|
||||
// to +315,576,000,000 inclusive. Note: these bounds are computed from:
|
||||
// 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
||||
// Signed fractions of a second at nanosecond resolution of the span
|
||||
// of time. Durations less than one second are represented with a 0
|
||||
// `seconds` field and a positive or negative `nanos` field. For durations
|
||||
// of one second or more, a non-zero value for the `nanos` field must be
|
||||
// of the same sign as the `seconds` field. Must be from -999,999,999
|
||||
// to +999,999,999 inclusive.
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Duration) Reset() { *m = Duration{} }
|
||||
func (*Duration) ProtoMessage() {}
|
||||
func (*Duration) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_23597b2ebd7ac6c5, []int{0}
|
||||
}
|
||||
func (*Duration) XXX_WellKnownType() string { return "Duration" }
|
||||
func (m *Duration) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Duration.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Duration) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Duration.Merge(m, src)
|
||||
}
|
||||
func (m *Duration) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Duration) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Duration.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Duration proto.InternalMessageInfo
|
||||
|
||||
func (m *Duration) GetSeconds() int64 {
|
||||
if m != nil {
|
||||
return m.Seconds
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Duration) GetNanos() int32 {
|
||||
if m != nil {
|
||||
return m.Nanos
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (*Duration) XXX_MessageName() string {
|
||||
return "google.protobuf.Duration"
|
||||
}
|
||||
func init() {
|
||||
proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_23597b2ebd7ac6c5) }
|
||||
|
||||
var fileDescriptor_23597b2ebd7ac6c5 = []byte{
|
||||
// 209 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
|
||||
0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
|
||||
0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
|
||||
0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
|
||||
0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0x7f, 0xe3, 0xa1, 0x1c,
|
||||
0xc3, 0x87, 0x87, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91,
|
||||
0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31, 0x7c, 0x78, 0x24, 0xc7, 0xb8, 0xe2,
|
||||
0xb1, 0x1c, 0xe3, 0x89, 0xc7, 0x72, 0x8c, 0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x56, 0x3b,
|
||||
0xf1, 0xc2, 0x2c, 0x0e, 0x00, 0x89, 0x04, 0x30, 0x46, 0xb1, 0x96, 0x54, 0x16, 0xa4, 0x16, 0xff,
|
||||
0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0xa2, 0x25, 0x00,
|
||||
0xaa, 0x45, 0x2f, 0x3c, 0x35, 0x27, 0xc7, 0x3b, 0x2f, 0xbf, 0x3c, 0x2f, 0x04, 0xa4, 0x32, 0x89,
|
||||
0x0d, 0x6c, 0x96, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x8a, 0x1c, 0x64, 0x4e, 0xf6, 0x00, 0x00,
|
||||
0x00,
|
||||
}
|
||||
|
||||
func (this *Duration) Compare(that interface{}) int {
|
||||
if that == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
that1, ok := that.(*Duration)
|
||||
if !ok {
|
||||
that2, ok := that.(Duration)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
} else if this == nil {
|
||||
return -1
|
||||
}
|
||||
if this.Seconds != that1.Seconds {
|
||||
if this.Seconds < that1.Seconds {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
if this.Nanos != that1.Nanos {
|
||||
if this.Nanos < that1.Nanos {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
|
||||
return c
|
||||
}
|
||||
return 0
|
||||
}
|
||||
func (this *Duration) Equal(that interface{}) bool {
|
||||
if that == nil {
|
||||
return this == nil
|
||||
}
|
||||
|
||||
that1, ok := that.(*Duration)
|
||||
if !ok {
|
||||
that2, ok := that.(Duration)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
return this == nil
|
||||
} else if this == nil {
|
||||
return false
|
||||
}
|
||||
if this.Seconds != that1.Seconds {
|
||||
return false
|
||||
}
|
||||
if this.Nanos != that1.Nanos {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
func (this *Duration) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&types.Duration{")
|
||||
s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n")
|
||||
s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringDuration(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func (m *Duration) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Duration) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if m.Nanos != 0 {
|
||||
i = encodeVarintDuration(dAtA, i, uint64(m.Nanos))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if m.Seconds != 0 {
|
||||
i = encodeVarintDuration(dAtA, i, uint64(m.Seconds))
|
||||
i--
|
||||
dAtA[i] = 0x8
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintDuration(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovDuration(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func (m *Duration) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Seconds != 0 {
|
||||
n += 1 + sovDuration(uint64(m.Seconds))
|
||||
}
|
||||
if m.Nanos != 0 {
|
||||
n += 1 + sovDuration(uint64(m.Nanos))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovDuration(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozDuration(x uint64) (n int) {
|
||||
return sovDuration(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *Duration) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDuration
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: Duration: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType)
|
||||
}
|
||||
m.Seconds = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDuration
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Seconds |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType)
|
||||
}
|
||||
m.Nanos = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowDuration
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Nanos |= int32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipDuration(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthDuration
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthDuration
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipDuration(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDuration
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDuration
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowDuration
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthDuration
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupDuration
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthDuration
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthDuration = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowDuration = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupDuration = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
100
vendor/github.com/gogo/protobuf/types/duration_gogo.go
generated
vendored
100
vendor/github.com/gogo/protobuf/types/duration_gogo.go
generated
vendored
|
@ -1,100 +0,0 @@
|
|||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2016, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewPopulatedDuration(r interface {
|
||||
Int63() int64
|
||||
}, easy bool) *Duration {
|
||||
this := &Duration{}
|
||||
maxSecs := time.Hour.Nanoseconds() / 1e9
|
||||
max := 2 * maxSecs
|
||||
s := int64(r.Int63()) % max
|
||||
s -= maxSecs
|
||||
neg := int64(1)
|
||||
if s < 0 {
|
||||
neg = -1
|
||||
}
|
||||
this.Seconds = s
|
||||
this.Nanos = int32(neg * (r.Int63() % 1e9))
|
||||
return this
|
||||
}
|
||||
|
||||
func (d *Duration) String() string {
|
||||
td, err := DurationFromProto(d)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("(%v)", err)
|
||||
}
|
||||
return td.String()
|
||||
}
|
||||
|
||||
func NewPopulatedStdDuration(r interface {
|
||||
Int63() int64
|
||||
}, easy bool) *time.Duration {
|
||||
dur := NewPopulatedDuration(r, easy)
|
||||
d, err := DurationFromProto(dur)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return &d
|
||||
}
|
||||
|
||||
func SizeOfStdDuration(d time.Duration) int {
|
||||
dur := DurationProto(d)
|
||||
return dur.Size()
|
||||
}
|
||||
|
||||
func StdDurationMarshal(d time.Duration) ([]byte, error) {
|
||||
size := SizeOfStdDuration(d)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdDurationMarshalTo(d, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdDurationMarshalTo(d time.Duration, data []byte) (int, error) {
|
||||
dur := DurationProto(d)
|
||||
return dur.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdDurationUnmarshal(d *time.Duration, data []byte) error {
|
||||
dur := &Duration{}
|
||||
if err := dur.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
dd, err := DurationFromProto(dur)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*d = dd
|
||||
return nil
|
||||
}
|
465
vendor/github.com/gogo/protobuf/types/empty.pb.go
generated
vendored
465
vendor/github.com/gogo/protobuf/types/empty.pb.go
generated
vendored
|
@ -1,465 +0,0 @@
|
|||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: google/protobuf/empty.proto
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
bytes "bytes"
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// A generic empty message that you can re-use to avoid defining duplicated
|
||||
// empty messages in your APIs. A typical example is to use it as the request
|
||||
// or the response type of an API method. For instance:
|
||||
//
|
||||
// service Foo {
|
||||
// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty);
|
||||
// }
|
||||
//
|
||||
// The JSON representation for `Empty` is empty JSON object `{}`.
|
||||
type Empty struct {
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Empty) Reset() { *m = Empty{} }
|
||||
func (*Empty) ProtoMessage() {}
|
||||
func (*Empty) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_900544acb223d5b8, []int{0}
|
||||
}
|
||||
func (*Empty) XXX_WellKnownType() string { return "Empty" }
|
||||
func (m *Empty) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Empty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Empty.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Empty) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Empty.Merge(m, src)
|
||||
}
|
||||
func (m *Empty) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Empty) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Empty.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Empty proto.InternalMessageInfo
|
||||
|
||||
func (*Empty) XXX_MessageName() string {
|
||||
return "google.protobuf.Empty"
|
||||
}
|
||||
func init() {
|
||||
proto.RegisterType((*Empty)(nil), "google.protobuf.Empty")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor_900544acb223d5b8) }
|
||||
|
||||
var fileDescriptor_900544acb223d5b8 = []byte{
|
||||
// 176 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28,
|
||||
0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57,
|
||||
0x90, 0xbc, 0x53, 0x0b, 0xe3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xfe, 0x78, 0x28,
|
||||
0xc7, 0xd8, 0xf0, 0x48, 0x8e, 0x71, 0xc5, 0x23, 0x39, 0xc6, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c,
|
||||
0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0xf1, 0xc5, 0x23, 0x39, 0x86, 0x0f, 0x20, 0xf1, 0xc7, 0x72,
|
||||
0x8c, 0x27, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe8, 0xc4, 0x05,
|
||||
0x36, 0x2e, 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0xfe, 0xc1, 0xc8,
|
||||
0xb8, 0x88, 0x89, 0xd9, 0x3d, 0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0x44, 0x7d, 0x00, 0x54, 0xbd,
|
||||
0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x65, 0x12, 0x1b, 0xd8,
|
||||
0x20, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x21, 0xbe, 0xb6, 0x31, 0xc6, 0x00, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (this *Empty) Compare(that interface{}) int {
|
||||
if that == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
that1, ok := that.(*Empty)
|
||||
if !ok {
|
||||
that2, ok := that.(Empty)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
} else if this == nil {
|
||||
return -1
|
||||
}
|
||||
if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
|
||||
return c
|
||||
}
|
||||
return 0
|
||||
}
|
||||
func (this *Empty) Equal(that interface{}) bool {
|
||||
if that == nil {
|
||||
return this == nil
|
||||
}
|
||||
|
||||
that1, ok := that.(*Empty)
|
||||
if !ok {
|
||||
that2, ok := that.(Empty)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
return this == nil
|
||||
} else if this == nil {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
func (this *Empty) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 4)
|
||||
s = append(s, "&types.Empty{")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringEmpty(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func (m *Empty) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Empty) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Empty) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintEmpty(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovEmpty(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func NewPopulatedEmpty(r randyEmpty, easy bool) *Empty {
|
||||
this := &Empty{}
|
||||
if !easy && r.Intn(10) != 0 {
|
||||
this.XXX_unrecognized = randUnrecognizedEmpty(r, 1)
|
||||
}
|
||||
return this
|
||||
}
|
||||
|
||||
type randyEmpty interface {
|
||||
Float32() float32
|
||||
Float64() float64
|
||||
Int63() int64
|
||||
Int31() int32
|
||||
Uint32() uint32
|
||||
Intn(n int) int
|
||||
}
|
||||
|
||||
func randUTF8RuneEmpty(r randyEmpty) rune {
|
||||
ru := r.Intn(62)
|
||||
if ru < 10 {
|
||||
return rune(ru + 48)
|
||||
} else if ru < 36 {
|
||||
return rune(ru + 55)
|
||||
}
|
||||
return rune(ru + 61)
|
||||
}
|
||||
func randStringEmpty(r randyEmpty) string {
|
||||
v1 := r.Intn(100)
|
||||
tmps := make([]rune, v1)
|
||||
for i := 0; i < v1; i++ {
|
||||
tmps[i] = randUTF8RuneEmpty(r)
|
||||
}
|
||||
return string(tmps)
|
||||
}
|
||||
func randUnrecognizedEmpty(r randyEmpty, maxFieldNumber int) (dAtA []byte) {
|
||||
l := r.Intn(5)
|
||||
for i := 0; i < l; i++ {
|
||||
wire := r.Intn(4)
|
||||
if wire == 3 {
|
||||
wire = 5
|
||||
}
|
||||
fieldNumber := maxFieldNumber + r.Intn(100)
|
||||
dAtA = randFieldEmpty(dAtA, r, fieldNumber, wire)
|
||||
}
|
||||
return dAtA
|
||||
}
|
||||
func randFieldEmpty(dAtA []byte, r randyEmpty, fieldNumber int, wire int) []byte {
|
||||
key := uint32(fieldNumber)<<3 | uint32(wire)
|
||||
switch wire {
|
||||
case 0:
|
||||
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key))
|
||||
v2 := r.Int63()
|
||||
if r.Intn(2) == 0 {
|
||||
v2 *= -1
|
||||
}
|
||||
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(v2))
|
||||
case 1:
|
||||
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key))
|
||||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||
case 2:
|
||||
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key))
|
||||
ll := r.Intn(100)
|
||||
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(ll))
|
||||
for j := 0; j < ll; j++ {
|
||||
dAtA = append(dAtA, byte(r.Intn(256)))
|
||||
}
|
||||
default:
|
||||
dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key))
|
||||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||
}
|
||||
return dAtA
|
||||
}
|
||||
func encodeVarintPopulateEmpty(dAtA []byte, v uint64) []byte {
|
||||
for v >= 1<<7 {
|
||||
dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))
|
||||
v >>= 7
|
||||
}
|
||||
dAtA = append(dAtA, uint8(v))
|
||||
return dAtA
|
||||
}
|
||||
func (m *Empty) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovEmpty(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozEmpty(x uint64) (n int) {
|
||||
return sovEmpty(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (this *Empty) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&Empty{`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func valueToStringEmpty(v interface{}) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("*%v", pv)
|
||||
}
|
||||
func (m *Empty) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowEmpty
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: Empty: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: Empty: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipEmpty(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthEmpty
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthEmpty
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipEmpty(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowEmpty
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowEmpty
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowEmpty
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthEmpty
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupEmpty
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthEmpty
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthEmpty = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowEmpty = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupEmpty = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
741
vendor/github.com/gogo/protobuf/types/field_mask.pb.go
generated
vendored
741
vendor/github.com/gogo/protobuf/types/field_mask.pb.go
generated
vendored
|
@ -1,741 +0,0 @@
|
|||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: google/protobuf/field_mask.proto
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
bytes "bytes"
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// `FieldMask` represents a set of symbolic field paths, for example:
|
||||
//
|
||||
// paths: "f.a"
|
||||
// paths: "f.b.d"
|
||||
//
|
||||
// Here `f` represents a field in some root message, `a` and `b`
|
||||
// fields in the message found in `f`, and `d` a field found in the
|
||||
// message in `f.b`.
|
||||
//
|
||||
// Field masks are used to specify a subset of fields that should be
|
||||
// returned by a get operation or modified by an update operation.
|
||||
// Field masks also have a custom JSON encoding (see below).
|
||||
//
|
||||
// # Field Masks in Projections
|
||||
//
|
||||
// When used in the context of a projection, a response message or
|
||||
// sub-message is filtered by the API to only contain those fields as
|
||||
// specified in the mask. For example, if the mask in the previous
|
||||
// example is applied to a response message as follows:
|
||||
//
|
||||
// f {
|
||||
// a : 22
|
||||
// b {
|
||||
// d : 1
|
||||
// x : 2
|
||||
// }
|
||||
// y : 13
|
||||
// }
|
||||
// z: 8
|
||||
//
|
||||
// The result will not contain specific values for fields x,y and z
|
||||
// (their value will be set to the default, and omitted in proto text
|
||||
// output):
|
||||
//
|
||||
//
|
||||
// f {
|
||||
// a : 22
|
||||
// b {
|
||||
// d : 1
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// A repeated field is not allowed except at the last position of a
|
||||
// paths string.
|
||||
//
|
||||
// If a FieldMask object is not present in a get operation, the
|
||||
// operation applies to all fields (as if a FieldMask of all fields
|
||||
// had been specified).
|
||||
//
|
||||
// Note that a field mask does not necessarily apply to the
|
||||
// top-level response message. In case of a REST get operation, the
|
||||
// field mask applies directly to the response, but in case of a REST
|
||||
// list operation, the mask instead applies to each individual message
|
||||
// in the returned resource list. In case of a REST custom method,
|
||||
// other definitions may be used. Where the mask applies will be
|
||||
// clearly documented together with its declaration in the API. In
|
||||
// any case, the effect on the returned resource/resources is required
|
||||
// behavior for APIs.
|
||||
//
|
||||
// # Field Masks in Update Operations
|
||||
//
|
||||
// A field mask in update operations specifies which fields of the
|
||||
// targeted resource are going to be updated. The API is required
|
||||
// to only change the values of the fields as specified in the mask
|
||||
// and leave the others untouched. If a resource is passed in to
|
||||
// describe the updated values, the API ignores the values of all
|
||||
// fields not covered by the mask.
|
||||
//
|
||||
// If a repeated field is specified for an update operation, new values will
|
||||
// be appended to the existing repeated field in the target resource. Note that
|
||||
// a repeated field is only allowed in the last position of a `paths` string.
|
||||
//
|
||||
// If a sub-message is specified in the last position of the field mask for an
|
||||
// update operation, then new value will be merged into the existing sub-message
|
||||
// in the target resource.
|
||||
//
|
||||
// For example, given the target message:
|
||||
//
|
||||
// f {
|
||||
// b {
|
||||
// d: 1
|
||||
// x: 2
|
||||
// }
|
||||
// c: [1]
|
||||
// }
|
||||
//
|
||||
// And an update message:
|
||||
//
|
||||
// f {
|
||||
// b {
|
||||
// d: 10
|
||||
// }
|
||||
// c: [2]
|
||||
// }
|
||||
//
|
||||
// then if the field mask is:
|
||||
//
|
||||
// paths: ["f.b", "f.c"]
|
||||
//
|
||||
// then the result will be:
|
||||
//
|
||||
// f {
|
||||
// b {
|
||||
// d: 10
|
||||
// x: 2
|
||||
// }
|
||||
// c: [1, 2]
|
||||
// }
|
||||
//
|
||||
// An implementation may provide options to override this default behavior for
|
||||
// repeated and message fields.
|
||||
//
|
||||
// In order to reset a field's value to the default, the field must
|
||||
// be in the mask and set to the default value in the provided resource.
|
||||
// Hence, in order to reset all fields of a resource, provide a default
|
||||
// instance of the resource and set all fields in the mask, or do
|
||||
// not provide a mask as described below.
|
||||
//
|
||||
// If a field mask is not present on update, the operation applies to
|
||||
// all fields (as if a field mask of all fields has been specified).
|
||||
// Note that in the presence of schema evolution, this may mean that
|
||||
// fields the client does not know and has therefore not filled into
|
||||
// the request will be reset to their default. If this is unwanted
|
||||
// behavior, a specific service may require a client to always specify
|
||||
// a field mask, producing an error if not.
|
||||
//
|
||||
// As with get operations, the location of the resource which
|
||||
// describes the updated values in the request message depends on the
|
||||
// operation kind. In any case, the effect of the field mask is
|
||||
// required to be honored by the API.
|
||||
//
|
||||
// ## Considerations for HTTP REST
|
||||
//
|
||||
// The HTTP kind of an update operation which uses a field mask must
|
||||
// be set to PATCH instead of PUT in order to satisfy HTTP semantics
|
||||
// (PUT must only be used for full updates).
|
||||
//
|
||||
// # JSON Encoding of Field Masks
|
||||
//
|
||||
// In JSON, a field mask is encoded as a single string where paths are
|
||||
// separated by a comma. Fields name in each path are converted
|
||||
// to/from lower-camel naming conventions.
|
||||
//
|
||||
// As an example, consider the following message declarations:
|
||||
//
|
||||
// message Profile {
|
||||
// User user = 1;
|
||||
// Photo photo = 2;
|
||||
// }
|
||||
// message User {
|
||||
// string display_name = 1;
|
||||
// string address = 2;
|
||||
// }
|
||||
//
|
||||
// In proto a field mask for `Profile` may look as such:
|
||||
//
|
||||
// mask {
|
||||
// paths: "user.display_name"
|
||||
// paths: "photo"
|
||||
// }
|
||||
//
|
||||
// In JSON, the same mask is represented as below:
|
||||
//
|
||||
// {
|
||||
// mask: "user.displayName,photo"
|
||||
// }
|
||||
//
|
||||
// # Field Masks and Oneof Fields
|
||||
//
|
||||
// Field masks treat fields in oneofs just as regular fields. Consider the
|
||||
// following message:
|
||||
//
|
||||
// message SampleMessage {
|
||||
// oneof test_oneof {
|
||||
// string name = 4;
|
||||
// SubMessage sub_message = 9;
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// The field mask can be:
|
||||
//
|
||||
// mask {
|
||||
// paths: "name"
|
||||
// }
|
||||
//
|
||||
// Or:
|
||||
//
|
||||
// mask {
|
||||
// paths: "sub_message"
|
||||
// }
|
||||
//
|
||||
// Note that oneof type names ("test_oneof" in this case) cannot be used in
|
||||
// paths.
|
||||
//
|
||||
// ## Field Mask Verification
|
||||
//
|
||||
// The implementation of any API method which has a FieldMask type field in the
|
||||
// request should verify the included field paths, and return an
|
||||
// `INVALID_ARGUMENT` error if any path is duplicated or unmappable.
|
||||
type FieldMask struct {
|
||||
// The set of field mask paths.
|
||||
Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *FieldMask) Reset() { *m = FieldMask{} }
|
||||
func (*FieldMask) ProtoMessage() {}
|
||||
func (*FieldMask) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_5158202634f0da48, []int{0}
|
||||
}
|
||||
func (m *FieldMask) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *FieldMask) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_FieldMask.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *FieldMask) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_FieldMask.Merge(m, src)
|
||||
}
|
||||
func (m *FieldMask) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *FieldMask) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_FieldMask.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_FieldMask proto.InternalMessageInfo
|
||||
|
||||
func (m *FieldMask) GetPaths() []string {
|
||||
if m != nil {
|
||||
return m.Paths
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*FieldMask) XXX_MessageName() string {
|
||||
return "google.protobuf.FieldMask"
|
||||
}
|
||||
func init() {
|
||||
proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptor_5158202634f0da48) }
|
||||
|
||||
var fileDescriptor_5158202634f0da48 = []byte{
|
||||
// 203 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd,
|
||||
0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54,
|
||||
0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16,
|
||||
0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0x1d, 0x8c,
|
||||
0x37, 0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, 0xc7, 0xf8, 0xe3, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39,
|
||||
0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23,
|
||||
0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x80, 0xc4, 0x1f, 0xcb, 0x31, 0x9e, 0x78, 0x2c, 0xc7,
|
||||
0xc8, 0x25, 0x9c, 0x9c, 0x9f, 0xab, 0x87, 0x66, 0x95, 0x13, 0x1f, 0xdc, 0xa2, 0x00, 0x90, 0x50,
|
||||
0x00, 0x63, 0x14, 0x6b, 0x49, 0x65, 0x41, 0x6a, 0xf1, 0x0f, 0x46, 0xc6, 0x45, 0x4c, 0xcc, 0xee,
|
||||
0x01, 0x4e, 0xab, 0x98, 0xe4, 0xdc, 0x21, 0x7a, 0x02, 0xa0, 0x7a, 0xf4, 0xc2, 0x53, 0x73, 0x72,
|
||||
0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0x2a, 0x93, 0xd8, 0xc0, 0x86, 0x19, 0x03, 0x02, 0x00,
|
||||
0x00, 0xff, 0xff, 0x43, 0xa0, 0x83, 0xd0, 0xe9, 0x00, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (this *FieldMask) Compare(that interface{}) int {
|
||||
if that == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
that1, ok := that.(*FieldMask)
|
||||
if !ok {
|
||||
that2, ok := that.(FieldMask)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
} else if this == nil {
|
||||
return -1
|
||||
}
|
||||
if len(this.Paths) != len(that1.Paths) {
|
||||
if len(this.Paths) < len(that1.Paths) {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
for i := range this.Paths {
|
||||
if this.Paths[i] != that1.Paths[i] {
|
||||
if this.Paths[i] < that1.Paths[i] {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
|
||||
return c
|
||||
}
|
||||
return 0
|
||||
}
|
||||
func (this *FieldMask) Equal(that interface{}) bool {
|
||||
if that == nil {
|
||||
return this == nil
|
||||
}
|
||||
|
||||
that1, ok := that.(*FieldMask)
|
||||
if !ok {
|
||||
that2, ok := that.(FieldMask)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
return this == nil
|
||||
} else if this == nil {
|
||||
return false
|
||||
}
|
||||
if len(this.Paths) != len(that1.Paths) {
|
||||
return false
|
||||
}
|
||||
for i := range this.Paths {
|
||||
if this.Paths[i] != that1.Paths[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
func (this *FieldMask) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&types.FieldMask{")
|
||||
s = append(s, "Paths: "+fmt.Sprintf("%#v", this.Paths)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringFieldMask(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func (m *FieldMask) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *FieldMask) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *FieldMask) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if len(m.Paths) > 0 {
|
||||
for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- {
|
||||
i -= len(m.Paths[iNdEx])
|
||||
copy(dAtA[i:], m.Paths[iNdEx])
|
||||
i = encodeVarintFieldMask(dAtA, i, uint64(len(m.Paths[iNdEx])))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintFieldMask(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovFieldMask(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func NewPopulatedFieldMask(r randyFieldMask, easy bool) *FieldMask {
|
||||
this := &FieldMask{}
|
||||
v1 := r.Intn(10)
|
||||
this.Paths = make([]string, v1)
|
||||
for i := 0; i < v1; i++ {
|
||||
this.Paths[i] = string(randStringFieldMask(r))
|
||||
}
|
||||
if !easy && r.Intn(10) != 0 {
|
||||
this.XXX_unrecognized = randUnrecognizedFieldMask(r, 2)
|
||||
}
|
||||
return this
|
||||
}
|
||||
|
||||
type randyFieldMask interface {
|
||||
Float32() float32
|
||||
Float64() float64
|
||||
Int63() int64
|
||||
Int31() int32
|
||||
Uint32() uint32
|
||||
Intn(n int) int
|
||||
}
|
||||
|
||||
func randUTF8RuneFieldMask(r randyFieldMask) rune {
|
||||
ru := r.Intn(62)
|
||||
if ru < 10 {
|
||||
return rune(ru + 48)
|
||||
} else if ru < 36 {
|
||||
return rune(ru + 55)
|
||||
}
|
||||
return rune(ru + 61)
|
||||
}
|
||||
func randStringFieldMask(r randyFieldMask) string {
|
||||
v2 := r.Intn(100)
|
||||
tmps := make([]rune, v2)
|
||||
for i := 0; i < v2; i++ {
|
||||
tmps[i] = randUTF8RuneFieldMask(r)
|
||||
}
|
||||
return string(tmps)
|
||||
}
|
||||
func randUnrecognizedFieldMask(r randyFieldMask, maxFieldNumber int) (dAtA []byte) {
|
||||
l := r.Intn(5)
|
||||
for i := 0; i < l; i++ {
|
||||
wire := r.Intn(4)
|
||||
if wire == 3 {
|
||||
wire = 5
|
||||
}
|
||||
fieldNumber := maxFieldNumber + r.Intn(100)
|
||||
dAtA = randFieldFieldMask(dAtA, r, fieldNumber, wire)
|
||||
}
|
||||
return dAtA
|
||||
}
|
||||
func randFieldFieldMask(dAtA []byte, r randyFieldMask, fieldNumber int, wire int) []byte {
|
||||
key := uint32(fieldNumber)<<3 | uint32(wire)
|
||||
switch wire {
|
||||
case 0:
|
||||
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key))
|
||||
v3 := r.Int63()
|
||||
if r.Intn(2) == 0 {
|
||||
v3 *= -1
|
||||
}
|
||||
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(v3))
|
||||
case 1:
|
||||
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key))
|
||||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||
case 2:
|
||||
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key))
|
||||
ll := r.Intn(100)
|
||||
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(ll))
|
||||
for j := 0; j < ll; j++ {
|
||||
dAtA = append(dAtA, byte(r.Intn(256)))
|
||||
}
|
||||
default:
|
||||
dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key))
|
||||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||
}
|
||||
return dAtA
|
||||
}
|
||||
func encodeVarintPopulateFieldMask(dAtA []byte, v uint64) []byte {
|
||||
for v >= 1<<7 {
|
||||
dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))
|
||||
v >>= 7
|
||||
}
|
||||
dAtA = append(dAtA, uint8(v))
|
||||
return dAtA
|
||||
}
|
||||
func (m *FieldMask) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Paths) > 0 {
|
||||
for _, s := range m.Paths {
|
||||
l = len(s)
|
||||
n += 1 + l + sovFieldMask(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovFieldMask(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozFieldMask(x uint64) (n int) {
|
||||
return sovFieldMask(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (this *FieldMask) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&FieldMask{`,
|
||||
`Paths:` + fmt.Sprintf("%v", this.Paths) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func valueToStringFieldMask(v interface{}) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("*%v", pv)
|
||||
}
|
||||
func (m *FieldMask) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowFieldMask
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: FieldMask: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: FieldMask: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowFieldMask
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthFieldMask
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthFieldMask
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipFieldMask(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthFieldMask
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthFieldMask
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipFieldMask(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowFieldMask
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowFieldMask
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowFieldMask
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthFieldMask
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupFieldMask
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthFieldMask
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthFieldMask = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowFieldMask = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupFieldMask = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
34
vendor/github.com/gogo/protobuf/types/protosize.go
generated
vendored
34
vendor/github.com/gogo/protobuf/types/protosize.go
generated
vendored
|
@ -1,34 +0,0 @@
|
|||
package types
|
||||
|
||||
func (m *Any) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Api) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Method) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Mixin) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Duration) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Empty) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *FieldMask) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *SourceContext) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Struct) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Value) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Value_NullValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Value_NumberValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Value_StringValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Value_BoolValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Value_StructValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Value_ListValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *ListValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Timestamp) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Type) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Field) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Enum) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *EnumValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Option) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *DoubleValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *FloatValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Int64Value) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *UInt64Value) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *Int32Value) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *UInt32Value) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *BoolValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *StringValue) ProtoSize() (n int) { return m.Size() }
|
||||
func (m *BytesValue) ProtoSize() (n int) { return m.Size() }
|
527
vendor/github.com/gogo/protobuf/types/source_context.pb.go
generated
vendored
527
vendor/github.com/gogo/protobuf/types/source_context.pb.go
generated
vendored
|
@ -1,527 +0,0 @@
|
|||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: google/protobuf/source_context.proto
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
bytes "bytes"
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// `SourceContext` represents information about the source of a
|
||||
// protobuf element, like the file in which it is defined.
|
||||
type SourceContext struct {
|
||||
// The path-qualified name of the .proto file that contained the associated
|
||||
// protobuf element. For example: `"google/protobuf/source_context.proto"`.
|
||||
FileName string `protobuf:"bytes,1,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SourceContext) Reset() { *m = SourceContext{} }
|
||||
func (*SourceContext) ProtoMessage() {}
|
||||
func (*SourceContext) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b686cdb126d509db, []int{0}
|
||||
}
|
||||
func (m *SourceContext) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *SourceContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_SourceContext.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *SourceContext) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SourceContext.Merge(m, src)
|
||||
}
|
||||
func (m *SourceContext) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *SourceContext) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SourceContext.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SourceContext proto.InternalMessageInfo
|
||||
|
||||
func (m *SourceContext) GetFileName() string {
|
||||
if m != nil {
|
||||
return m.FileName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (*SourceContext) XXX_MessageName() string {
|
||||
return "google.protobuf.SourceContext"
|
||||
}
|
||||
func init() {
|
||||
proto.RegisterType((*SourceContext)(nil), "google.protobuf.SourceContext")
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterFile("google/protobuf/source_context.proto", fileDescriptor_b686cdb126d509db)
|
||||
}
|
||||
|
||||
var fileDescriptor_b686cdb126d509db = []byte{
|
||||
// 212 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xce, 0x2f, 0x2d,
|
||||
0x4a, 0x4e, 0x8d, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x03, 0x8b, 0x0b, 0xf1, 0x43,
|
||||
0x54, 0xe9, 0xc1, 0x54, 0x29, 0xe9, 0x70, 0xf1, 0x06, 0x83, 0x15, 0x3a, 0x43, 0xd4, 0x09, 0x49,
|
||||
0x73, 0x71, 0xa6, 0x65, 0xe6, 0xa4, 0xc6, 0xe7, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a,
|
||||
0x70, 0x06, 0x71, 0x80, 0x04, 0xfc, 0x12, 0x73, 0x53, 0x9d, 0x3a, 0x19, 0x6f, 0x3c, 0x94, 0x63,
|
||||
0xf8, 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9,
|
||||
0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e,
|
||||
0xc9, 0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0x3c, 0xf1, 0x58, 0x8e, 0x91, 0x4b, 0x38, 0x39,
|
||||
0x3f, 0x57, 0x0f, 0xcd, 0x56, 0x27, 0x21, 0x14, 0x3b, 0x03, 0x40, 0xc2, 0x01, 0x8c, 0x51, 0xac,
|
||||
0x25, 0x95, 0x05, 0xa9, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43,
|
||||
0x34, 0x05, 0x40, 0x35, 0xe9, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80,
|
||||
0x94, 0x25, 0xb1, 0x81, 0x4d, 0x33, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb8, 0x37, 0x2a, 0xa1,
|
||||
0xf9, 0x00, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (this *SourceContext) Compare(that interface{}) int {
|
||||
if that == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
that1, ok := that.(*SourceContext)
|
||||
if !ok {
|
||||
that2, ok := that.(SourceContext)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
} else if this == nil {
|
||||
return -1
|
||||
}
|
||||
if this.FileName != that1.FileName {
|
||||
if this.FileName < that1.FileName {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
|
||||
return c
|
||||
}
|
||||
return 0
|
||||
}
|
||||
func (this *SourceContext) Equal(that interface{}) bool {
|
||||
if that == nil {
|
||||
return this == nil
|
||||
}
|
||||
|
||||
that1, ok := that.(*SourceContext)
|
||||
if !ok {
|
||||
that2, ok := that.(SourceContext)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
return this == nil
|
||||
} else if this == nil {
|
||||
return false
|
||||
}
|
||||
if this.FileName != that1.FileName {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
func (this *SourceContext) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 5)
|
||||
s = append(s, "&types.SourceContext{")
|
||||
s = append(s, "FileName: "+fmt.Sprintf("%#v", this.FileName)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringSourceContext(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func (m *SourceContext) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *SourceContext) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *SourceContext) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if len(m.FileName) > 0 {
|
||||
i -= len(m.FileName)
|
||||
copy(dAtA[i:], m.FileName)
|
||||
i = encodeVarintSourceContext(dAtA, i, uint64(len(m.FileName)))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintSourceContext(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovSourceContext(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func NewPopulatedSourceContext(r randySourceContext, easy bool) *SourceContext {
|
||||
this := &SourceContext{}
|
||||
this.FileName = string(randStringSourceContext(r))
|
||||
if !easy && r.Intn(10) != 0 {
|
||||
this.XXX_unrecognized = randUnrecognizedSourceContext(r, 2)
|
||||
}
|
||||
return this
|
||||
}
|
||||
|
||||
type randySourceContext interface {
|
||||
Float32() float32
|
||||
Float64() float64
|
||||
Int63() int64
|
||||
Int31() int32
|
||||
Uint32() uint32
|
||||
Intn(n int) int
|
||||
}
|
||||
|
||||
func randUTF8RuneSourceContext(r randySourceContext) rune {
|
||||
ru := r.Intn(62)
|
||||
if ru < 10 {
|
||||
return rune(ru + 48)
|
||||
} else if ru < 36 {
|
||||
return rune(ru + 55)
|
||||
}
|
||||
return rune(ru + 61)
|
||||
}
|
||||
func randStringSourceContext(r randySourceContext) string {
|
||||
v1 := r.Intn(100)
|
||||
tmps := make([]rune, v1)
|
||||
for i := 0; i < v1; i++ {
|
||||
tmps[i] = randUTF8RuneSourceContext(r)
|
||||
}
|
||||
return string(tmps)
|
||||
}
|
||||
func randUnrecognizedSourceContext(r randySourceContext, maxFieldNumber int) (dAtA []byte) {
|
||||
l := r.Intn(5)
|
||||
for i := 0; i < l; i++ {
|
||||
wire := r.Intn(4)
|
||||
if wire == 3 {
|
||||
wire = 5
|
||||
}
|
||||
fieldNumber := maxFieldNumber + r.Intn(100)
|
||||
dAtA = randFieldSourceContext(dAtA, r, fieldNumber, wire)
|
||||
}
|
||||
return dAtA
|
||||
}
|
||||
func randFieldSourceContext(dAtA []byte, r randySourceContext, fieldNumber int, wire int) []byte {
|
||||
key := uint32(fieldNumber)<<3 | uint32(wire)
|
||||
switch wire {
|
||||
case 0:
|
||||
dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key))
|
||||
v2 := r.Int63()
|
||||
if r.Intn(2) == 0 {
|
||||
v2 *= -1
|
||||
}
|
||||
dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(v2))
|
||||
case 1:
|
||||
dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key))
|
||||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||
case 2:
|
||||
dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key))
|
||||
ll := r.Intn(100)
|
||||
dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(ll))
|
||||
for j := 0; j < ll; j++ {
|
||||
dAtA = append(dAtA, byte(r.Intn(256)))
|
||||
}
|
||||
default:
|
||||
dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key))
|
||||
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
|
||||
}
|
||||
return dAtA
|
||||
}
|
||||
func encodeVarintPopulateSourceContext(dAtA []byte, v uint64) []byte {
|
||||
for v >= 1<<7 {
|
||||
dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))
|
||||
v >>= 7
|
||||
}
|
||||
dAtA = append(dAtA, uint8(v))
|
||||
return dAtA
|
||||
}
|
||||
func (m *SourceContext) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.FileName)
|
||||
if l > 0 {
|
||||
n += 1 + l + sovSourceContext(uint64(l))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovSourceContext(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozSourceContext(x uint64) (n int) {
|
||||
return sovSourceContext(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (this *SourceContext) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&SourceContext{`,
|
||||
`FileName:` + fmt.Sprintf("%v", this.FileName) + `,`,
|
||||
`XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func valueToStringSourceContext(v interface{}) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("*%v", pv)
|
||||
}
|
||||
func (m *SourceContext) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowSourceContext
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: SourceContext: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: SourceContext: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field FileName", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowSourceContext
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthSourceContext
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthSourceContext
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.FileName = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipSourceContext(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthSourceContext
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthSourceContext
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipSourceContext(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowSourceContext
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowSourceContext
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowSourceContext
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthSourceContext
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupSourceContext
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthSourceContext
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthSourceContext = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowSourceContext = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupSourceContext = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
2280
vendor/github.com/gogo/protobuf/types/struct.pb.go
generated
vendored
2280
vendor/github.com/gogo/protobuf/types/struct.pb.go
generated
vendored
File diff suppressed because it is too large
Load diff
130
vendor/github.com/gogo/protobuf/types/timestamp.go
generated
vendored
130
vendor/github.com/gogo/protobuf/types/timestamp.go
generated
vendored
|
@ -1,130 +0,0 @@
|
|||
// Go support for Protocol Buffers - Google's data interchange format
|
||||
//
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// https://github.com/golang/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package types
|
||||
|
||||
// This file implements operations on google.protobuf.Timestamp.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// Seconds field of the earliest valid Timestamp.
|
||||
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
minValidSeconds = -62135596800
|
||||
// Seconds field just after the latest valid Timestamp.
|
||||
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
|
||||
maxValidSeconds = 253402300800
|
||||
)
|
||||
|
||||
// validateTimestamp determines whether a Timestamp is valid.
|
||||
// A valid timestamp represents a time in the range
|
||||
// [0001-01-01, 10000-01-01) and has a Nanos field
|
||||
// in the range [0, 1e9).
|
||||
//
|
||||
// If the Timestamp is valid, validateTimestamp returns nil.
|
||||
// Otherwise, it returns an error that describes
|
||||
// the problem.
|
||||
//
|
||||
// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
|
||||
func validateTimestamp(ts *Timestamp) error {
|
||||
if ts == nil {
|
||||
return errors.New("timestamp: nil Timestamp")
|
||||
}
|
||||
if ts.Seconds < minValidSeconds {
|
||||
return fmt.Errorf("timestamp: %#v before 0001-01-01", ts)
|
||||
}
|
||||
if ts.Seconds >= maxValidSeconds {
|
||||
return fmt.Errorf("timestamp: %#v after 10000-01-01", ts)
|
||||
}
|
||||
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
|
||||
return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time.
|
||||
// It returns an error if the argument is invalid.
|
||||
//
|
||||
// Unlike most Go functions, if Timestamp returns an error, the first return value
|
||||
// is not the zero time.Time. Instead, it is the value obtained from the
|
||||
// time.Unix function when passed the contents of the Timestamp, in the UTC
|
||||
// locale. This may or may not be a meaningful time; many invalid Timestamps
|
||||
// do map to valid time.Times.
|
||||
//
|
||||
// A nil Timestamp returns an error. The first return value in that case is
|
||||
// undefined.
|
||||
func TimestampFromProto(ts *Timestamp) (time.Time, error) {
|
||||
// Don't return the zero value on error, because corresponds to a valid
|
||||
// timestamp. Instead return whatever time.Unix gives us.
|
||||
var t time.Time
|
||||
if ts == nil {
|
||||
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
|
||||
} else {
|
||||
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
|
||||
}
|
||||
return t, validateTimestamp(ts)
|
||||
}
|
||||
|
||||
// TimestampNow returns a google.protobuf.Timestamp for the current time.
|
||||
func TimestampNow() *Timestamp {
|
||||
ts, err := TimestampProto(time.Now())
|
||||
if err != nil {
|
||||
panic("ptypes: time.Now() out of Timestamp range")
|
||||
}
|
||||
return ts
|
||||
}
|
||||
|
||||
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
|
||||
// It returns an error if the resulting Timestamp is invalid.
|
||||
func TimestampProto(t time.Time) (*Timestamp, error) {
|
||||
ts := &Timestamp{
|
||||
Seconds: t.Unix(),
|
||||
Nanos: int32(t.Nanosecond()),
|
||||
}
|
||||
if err := validateTimestamp(ts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ts, nil
|
||||
}
|
||||
|
||||
// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid
|
||||
// Timestamps, it returns an error message in parentheses.
|
||||
func TimestampString(ts *Timestamp) string {
|
||||
t, err := TimestampFromProto(ts)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("(%v)", err)
|
||||
}
|
||||
return t.Format(time.RFC3339Nano)
|
||||
}
|
542
vendor/github.com/gogo/protobuf/types/timestamp.pb.go
generated
vendored
542
vendor/github.com/gogo/protobuf/types/timestamp.pb.go
generated
vendored
|
@ -1,542 +0,0 @@
|
|||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: google/protobuf/timestamp.proto
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
bytes "bytes"
|
||||
fmt "fmt"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
io "io"
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
reflect "reflect"
|
||||
strings "strings"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
// A Timestamp represents a point in time independent of any time zone or local
|
||||
// calendar, encoded as a count of seconds and fractions of seconds at
|
||||
// nanosecond resolution. The count is relative to an epoch at UTC midnight on
|
||||
// January 1, 1970, in the proleptic Gregorian calendar which extends the
|
||||
// Gregorian calendar backwards to year one.
|
||||
//
|
||||
// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap
|
||||
// second table is needed for interpretation, using a [24-hour linear
|
||||
// smear](https://developers.google.com/time/smear).
|
||||
//
|
||||
// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By
|
||||
// restricting to that range, we ensure that we can convert to and from [RFC
|
||||
// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings.
|
||||
//
|
||||
// # Examples
|
||||
//
|
||||
// Example 1: Compute Timestamp from POSIX `time()`.
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(time(NULL));
|
||||
// timestamp.set_nanos(0);
|
||||
//
|
||||
// Example 2: Compute Timestamp from POSIX `gettimeofday()`.
|
||||
//
|
||||
// struct timeval tv;
|
||||
// gettimeofday(&tv, NULL);
|
||||
//
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds(tv.tv_sec);
|
||||
// timestamp.set_nanos(tv.tv_usec * 1000);
|
||||
//
|
||||
// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`.
|
||||
//
|
||||
// FILETIME ft;
|
||||
// GetSystemTimeAsFileTime(&ft);
|
||||
// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime;
|
||||
//
|
||||
// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z
|
||||
// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z.
|
||||
// Timestamp timestamp;
|
||||
// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL));
|
||||
// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100));
|
||||
//
|
||||
// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`.
|
||||
//
|
||||
// long millis = System.currentTimeMillis();
|
||||
//
|
||||
// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000)
|
||||
// .setNanos((int) ((millis % 1000) * 1000000)).build();
|
||||
//
|
||||
//
|
||||
// Example 5: Compute Timestamp from current time in Python.
|
||||
//
|
||||
// timestamp = Timestamp()
|
||||
// timestamp.GetCurrentTime()
|
||||
//
|
||||
// # JSON Mapping
|
||||
//
|
||||
// In JSON format, the Timestamp type is encoded as a string in the
|
||||
// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
|
||||
// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
|
||||
// where {year} is always expressed using four digits while {month}, {day},
|
||||
// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
|
||||
// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
|
||||
// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
|
||||
// is required. A proto3 JSON serializer should always use UTC (as indicated by
|
||||
// "Z") when printing the Timestamp type and a proto3 JSON parser should be
|
||||
// able to accept both UTC and other timezones (as indicated by an offset).
|
||||
//
|
||||
// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
|
||||
// 01:30 UTC on January 15, 2017.
|
||||
//
|
||||
// In JavaScript, one can convert a Date object to this format using the
|
||||
// standard
|
||||
// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString)
|
||||
// method. In Python, a standard `datetime.datetime` object can be converted
|
||||
// to this format using
|
||||
// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with
|
||||
// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use
|
||||
// the Joda Time's [`ISODateTimeFormat.dateTime()`](
|
||||
// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D
|
||||
// ) to obtain a formatter capable of generating timestamps in this format.
|
||||
//
|
||||
//
|
||||
type Timestamp struct {
|
||||
// Represents seconds of UTC time since Unix epoch
|
||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
||||
// 9999-12-31T23:59:59Z inclusive.
|
||||
Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
|
||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
||||
// second values with fractions must still have non-negative nanos values
|
||||
// that count forward in time. Must be from 0 to 999,999,999
|
||||
// inclusive.
|
||||
Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *Timestamp) Reset() { *m = Timestamp{} }
|
||||
func (*Timestamp) ProtoMessage() {}
|
||||
func (*Timestamp) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_292007bbfe81227e, []int{0}
|
||||
}
|
||||
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
|
||||
func (m *Timestamp) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
if deterministic {
|
||||
return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic)
|
||||
} else {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
}
|
||||
func (m *Timestamp) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_Timestamp.Merge(m, src)
|
||||
}
|
||||
func (m *Timestamp) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *Timestamp) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_Timestamp.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_Timestamp proto.InternalMessageInfo
|
||||
|
||||
func (m *Timestamp) GetSeconds() int64 {
|
||||
if m != nil {
|
||||
return m.Seconds
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *Timestamp) GetNanos() int32 {
|
||||
if m != nil {
|
||||
return m.Nanos
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (*Timestamp) XXX_MessageName() string {
|
||||
return "google.protobuf.Timestamp"
|
||||
}
|
||||
func init() {
|
||||
proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_292007bbfe81227e) }
|
||||
|
||||
var fileDescriptor_292007bbfe81227e = []byte{
|
||||
// 212 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
|
||||
0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
|
||||
0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
|
||||
0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
|
||||
0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
|
||||
0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x03, 0xe3, 0x8d,
|
||||
0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3,
|
||||
0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, 0xf0, 0xe1, 0x91, 0x1c,
|
||||
0xe3, 0x8a, 0xc7, 0x72, 0x8c, 0x27, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1,
|
||||
0x59, 0xee, 0xc4, 0x07, 0xb7, 0x3a, 0x00, 0x24, 0x14, 0xc0, 0x18, 0xc5, 0x5a, 0x52, 0x59, 0x90,
|
||||
0x5a, 0xfc, 0x83, 0x91, 0x71, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88,
|
||||
0x9e, 0x00, 0xa8, 0x1e, 0xbd, 0xf0, 0xd4, 0x9c, 0x1c, 0xef, 0xbc, 0xfc, 0xf2, 0xbc, 0x10, 0x90,
|
||||
0xca, 0x24, 0x36, 0xb0, 0x61, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0b, 0x23, 0x83, 0xdd,
|
||||
0xfa, 0x00, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (this *Timestamp) Compare(that interface{}) int {
|
||||
if that == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
that1, ok := that.(*Timestamp)
|
||||
if !ok {
|
||||
that2, ok := that.(Timestamp)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
if this == nil {
|
||||
return 0
|
||||
}
|
||||
return 1
|
||||
} else if this == nil {
|
||||
return -1
|
||||
}
|
||||
if this.Seconds != that1.Seconds {
|
||||
if this.Seconds < that1.Seconds {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
if this.Nanos != that1.Nanos {
|
||||
if this.Nanos < that1.Nanos {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
if c := bytes.Compare(this.XXX_unrecognized, that1.XXX_unrecognized); c != 0 {
|
||||
return c
|
||||
}
|
||||
return 0
|
||||
}
|
||||
func (this *Timestamp) Equal(that interface{}) bool {
|
||||
if that == nil {
|
||||
return this == nil
|
||||
}
|
||||
|
||||
that1, ok := that.(*Timestamp)
|
||||
if !ok {
|
||||
that2, ok := that.(Timestamp)
|
||||
if ok {
|
||||
that1 = &that2
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if that1 == nil {
|
||||
return this == nil
|
||||
} else if this == nil {
|
||||
return false
|
||||
}
|
||||
if this.Seconds != that1.Seconds {
|
||||
return false
|
||||
}
|
||||
if this.Nanos != that1.Nanos {
|
||||
return false
|
||||
}
|
||||
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
func (this *Timestamp) GoString() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := make([]string, 0, 6)
|
||||
s = append(s, "&types.Timestamp{")
|
||||
s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n")
|
||||
s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n")
|
||||
if this.XXX_unrecognized != nil {
|
||||
s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
|
||||
}
|
||||
s = append(s, "}")
|
||||
return strings.Join(s, "")
|
||||
}
|
||||
func valueToGoStringTimestamp(v interface{}, typ string) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
return "nil"
|
||||
}
|
||||
pv := reflect.Indirect(rv).Interface()
|
||||
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
|
||||
}
|
||||
func (m *Timestamp) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *Timestamp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.XXX_unrecognized != nil {
|
||||
i -= len(m.XXX_unrecognized)
|
||||
copy(dAtA[i:], m.XXX_unrecognized)
|
||||
}
|
||||
if m.Nanos != 0 {
|
||||
i = encodeVarintTimestamp(dAtA, i, uint64(m.Nanos))
|
||||
i--
|
||||
dAtA[i] = 0x10
|
||||
}
|
||||
if m.Seconds != 0 {
|
||||
i = encodeVarintTimestamp(dAtA, i, uint64(m.Seconds))
|
||||
i--
|
||||
dAtA[i] = 0x8
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintTimestamp(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovTimestamp(v)
|
||||
base := offset
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
dAtA[offset] = uint8(v)
|
||||
return base
|
||||
}
|
||||
func (m *Timestamp) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if m.Seconds != 0 {
|
||||
n += 1 + sovTimestamp(uint64(m.Seconds))
|
||||
}
|
||||
if m.Nanos != 0 {
|
||||
n += 1 + sovTimestamp(uint64(m.Nanos))
|
||||
}
|
||||
if m.XXX_unrecognized != nil {
|
||||
n += len(m.XXX_unrecognized)
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovTimestamp(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
func sozTimestamp(x uint64) (n int) {
|
||||
return sovTimestamp(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *Timestamp) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTimestamp
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: Timestamp: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType)
|
||||
}
|
||||
m.Seconds = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTimestamp
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Seconds |= int64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 2:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType)
|
||||
}
|
||||
m.Nanos = 0
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTimestamp
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
m.Nanos |= int32(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipTimestamp(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthTimestamp
|
||||
}
|
||||
if (iNdEx + skippy) < 0 {
|
||||
return ErrInvalidLengthTimestamp
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipTimestamp(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
depth := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTimestamp
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTimestamp
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if dAtA[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowTimestamp
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthTimestamp
|
||||
}
|
||||
iNdEx += length
|
||||
case 3:
|
||||
depth++
|
||||
case 4:
|
||||
if depth == 0 {
|
||||
return 0, ErrUnexpectedEndOfGroupTimestamp
|
||||
}
|
||||
depth--
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
if iNdEx < 0 {
|
||||
return 0, ErrInvalidLengthTimestamp
|
||||
}
|
||||
if depth == 0 {
|
||||
return iNdEx, nil
|
||||
}
|
||||
}
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthTimestamp = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowTimestamp = fmt.Errorf("proto: integer overflow")
|
||||
ErrUnexpectedEndOfGroupTimestamp = fmt.Errorf("proto: unexpected end of group")
|
||||
)
|
94
vendor/github.com/gogo/protobuf/types/timestamp_gogo.go
generated
vendored
94
vendor/github.com/gogo/protobuf/types/timestamp_gogo.go
generated
vendored
|
@ -1,94 +0,0 @@
|
|||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2016, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func NewPopulatedTimestamp(r interface {
|
||||
Int63() int64
|
||||
}, easy bool) *Timestamp {
|
||||
this := &Timestamp{}
|
||||
ns := int64(r.Int63())
|
||||
this.Seconds = ns / 1e9
|
||||
this.Nanos = int32(ns % 1e9)
|
||||
return this
|
||||
}
|
||||
|
||||
func (ts *Timestamp) String() string {
|
||||
return TimestampString(ts)
|
||||
}
|
||||
|
||||
func NewPopulatedStdTime(r interface {
|
||||
Int63() int64
|
||||
}, easy bool) *time.Time {
|
||||
timestamp := NewPopulatedTimestamp(r, easy)
|
||||
t, err := TimestampFromProto(timestamp)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return &t
|
||||
}
|
||||
|
||||
func SizeOfStdTime(t time.Time) int {
|
||||
ts, err := TimestampProto(t)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return ts.Size()
|
||||
}
|
||||
|
||||
func StdTimeMarshal(t time.Time) ([]byte, error) {
|
||||
size := SizeOfStdTime(t)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdTimeMarshalTo(t, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdTimeMarshalTo(t time.Time, data []byte) (int, error) {
|
||||
ts, err := TimestampProto(t)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return ts.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdTimeUnmarshal(t *time.Time, data []byte) error {
|
||||
ts := &Timestamp{}
|
||||
if err := ts.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
tt, err := TimestampFromProto(ts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*t = tt
|
||||
return nil
|
||||
}
|
3370
vendor/github.com/gogo/protobuf/types/type.pb.go
generated
vendored
3370
vendor/github.com/gogo/protobuf/types/type.pb.go
generated
vendored
File diff suppressed because it is too large
Load diff
2730
vendor/github.com/gogo/protobuf/types/wrappers.pb.go
generated
vendored
2730
vendor/github.com/gogo/protobuf/types/wrappers.pb.go
generated
vendored
File diff suppressed because it is too large
Load diff
300
vendor/github.com/gogo/protobuf/types/wrappers_gogo.go
generated
vendored
300
vendor/github.com/gogo/protobuf/types/wrappers_gogo.go
generated
vendored
|
@ -1,300 +0,0 @@
|
|||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2018, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/gogo/protobuf
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
package types
|
||||
|
||||
func NewPopulatedStdDouble(r randyWrappers, easy bool) *float64 {
|
||||
v := NewPopulatedDoubleValue(r, easy)
|
||||
return &v.Value
|
||||
}
|
||||
|
||||
func SizeOfStdDouble(v float64) int {
|
||||
pv := &DoubleValue{Value: v}
|
||||
return pv.Size()
|
||||
}
|
||||
|
||||
func StdDoubleMarshal(v float64) ([]byte, error) {
|
||||
size := SizeOfStdDouble(v)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdDoubleMarshalTo(v, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdDoubleMarshalTo(v float64, data []byte) (int, error) {
|
||||
pv := &DoubleValue{Value: v}
|
||||
return pv.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdDoubleUnmarshal(v *float64, data []byte) error {
|
||||
pv := &DoubleValue{}
|
||||
if err := pv.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
*v = pv.Value
|
||||
return nil
|
||||
}
|
||||
func NewPopulatedStdFloat(r randyWrappers, easy bool) *float32 {
|
||||
v := NewPopulatedFloatValue(r, easy)
|
||||
return &v.Value
|
||||
}
|
||||
|
||||
func SizeOfStdFloat(v float32) int {
|
||||
pv := &FloatValue{Value: v}
|
||||
return pv.Size()
|
||||
}
|
||||
|
||||
func StdFloatMarshal(v float32) ([]byte, error) {
|
||||
size := SizeOfStdFloat(v)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdFloatMarshalTo(v, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdFloatMarshalTo(v float32, data []byte) (int, error) {
|
||||
pv := &FloatValue{Value: v}
|
||||
return pv.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdFloatUnmarshal(v *float32, data []byte) error {
|
||||
pv := &FloatValue{}
|
||||
if err := pv.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
*v = pv.Value
|
||||
return nil
|
||||
}
|
||||
func NewPopulatedStdInt64(r randyWrappers, easy bool) *int64 {
|
||||
v := NewPopulatedInt64Value(r, easy)
|
||||
return &v.Value
|
||||
}
|
||||
|
||||
func SizeOfStdInt64(v int64) int {
|
||||
pv := &Int64Value{Value: v}
|
||||
return pv.Size()
|
||||
}
|
||||
|
||||
func StdInt64Marshal(v int64) ([]byte, error) {
|
||||
size := SizeOfStdInt64(v)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdInt64MarshalTo(v, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdInt64MarshalTo(v int64, data []byte) (int, error) {
|
||||
pv := &Int64Value{Value: v}
|
||||
return pv.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdInt64Unmarshal(v *int64, data []byte) error {
|
||||
pv := &Int64Value{}
|
||||
if err := pv.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
*v = pv.Value
|
||||
return nil
|
||||
}
|
||||
func NewPopulatedStdUInt64(r randyWrappers, easy bool) *uint64 {
|
||||
v := NewPopulatedUInt64Value(r, easy)
|
||||
return &v.Value
|
||||
}
|
||||
|
||||
func SizeOfStdUInt64(v uint64) int {
|
||||
pv := &UInt64Value{Value: v}
|
||||
return pv.Size()
|
||||
}
|
||||
|
||||
func StdUInt64Marshal(v uint64) ([]byte, error) {
|
||||
size := SizeOfStdUInt64(v)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdUInt64MarshalTo(v, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdUInt64MarshalTo(v uint64, data []byte) (int, error) {
|
||||
pv := &UInt64Value{Value: v}
|
||||
return pv.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdUInt64Unmarshal(v *uint64, data []byte) error {
|
||||
pv := &UInt64Value{}
|
||||
if err := pv.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
*v = pv.Value
|
||||
return nil
|
||||
}
|
||||
func NewPopulatedStdInt32(r randyWrappers, easy bool) *int32 {
|
||||
v := NewPopulatedInt32Value(r, easy)
|
||||
return &v.Value
|
||||
}
|
||||
|
||||
func SizeOfStdInt32(v int32) int {
|
||||
pv := &Int32Value{Value: v}
|
||||
return pv.Size()
|
||||
}
|
||||
|
||||
func StdInt32Marshal(v int32) ([]byte, error) {
|
||||
size := SizeOfStdInt32(v)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdInt32MarshalTo(v, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdInt32MarshalTo(v int32, data []byte) (int, error) {
|
||||
pv := &Int32Value{Value: v}
|
||||
return pv.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdInt32Unmarshal(v *int32, data []byte) error {
|
||||
pv := &Int32Value{}
|
||||
if err := pv.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
*v = pv.Value
|
||||
return nil
|
||||
}
|
||||
func NewPopulatedStdUInt32(r randyWrappers, easy bool) *uint32 {
|
||||
v := NewPopulatedUInt32Value(r, easy)
|
||||
return &v.Value
|
||||
}
|
||||
|
||||
func SizeOfStdUInt32(v uint32) int {
|
||||
pv := &UInt32Value{Value: v}
|
||||
return pv.Size()
|
||||
}
|
||||
|
||||
func StdUInt32Marshal(v uint32) ([]byte, error) {
|
||||
size := SizeOfStdUInt32(v)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdUInt32MarshalTo(v, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdUInt32MarshalTo(v uint32, data []byte) (int, error) {
|
||||
pv := &UInt32Value{Value: v}
|
||||
return pv.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdUInt32Unmarshal(v *uint32, data []byte) error {
|
||||
pv := &UInt32Value{}
|
||||
if err := pv.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
*v = pv.Value
|
||||
return nil
|
||||
}
|
||||
func NewPopulatedStdBool(r randyWrappers, easy bool) *bool {
|
||||
v := NewPopulatedBoolValue(r, easy)
|
||||
return &v.Value
|
||||
}
|
||||
|
||||
func SizeOfStdBool(v bool) int {
|
||||
pv := &BoolValue{Value: v}
|
||||
return pv.Size()
|
||||
}
|
||||
|
||||
func StdBoolMarshal(v bool) ([]byte, error) {
|
||||
size := SizeOfStdBool(v)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdBoolMarshalTo(v, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdBoolMarshalTo(v bool, data []byte) (int, error) {
|
||||
pv := &BoolValue{Value: v}
|
||||
return pv.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdBoolUnmarshal(v *bool, data []byte) error {
|
||||
pv := &BoolValue{}
|
||||
if err := pv.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
*v = pv.Value
|
||||
return nil
|
||||
}
|
||||
func NewPopulatedStdString(r randyWrappers, easy bool) *string {
|
||||
v := NewPopulatedStringValue(r, easy)
|
||||
return &v.Value
|
||||
}
|
||||
|
||||
func SizeOfStdString(v string) int {
|
||||
pv := &StringValue{Value: v}
|
||||
return pv.Size()
|
||||
}
|
||||
|
||||
func StdStringMarshal(v string) ([]byte, error) {
|
||||
size := SizeOfStdString(v)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdStringMarshalTo(v, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdStringMarshalTo(v string, data []byte) (int, error) {
|
||||
pv := &StringValue{Value: v}
|
||||
return pv.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdStringUnmarshal(v *string, data []byte) error {
|
||||
pv := &StringValue{}
|
||||
if err := pv.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
*v = pv.Value
|
||||
return nil
|
||||
}
|
||||
func NewPopulatedStdBytes(r randyWrappers, easy bool) *[]byte {
|
||||
v := NewPopulatedBytesValue(r, easy)
|
||||
return &v.Value
|
||||
}
|
||||
|
||||
func SizeOfStdBytes(v []byte) int {
|
||||
pv := &BytesValue{Value: v}
|
||||
return pv.Size()
|
||||
}
|
||||
|
||||
func StdBytesMarshal(v []byte) ([]byte, error) {
|
||||
size := SizeOfStdBytes(v)
|
||||
buf := make([]byte, size)
|
||||
_, err := StdBytesMarshalTo(v, buf)
|
||||
return buf, err
|
||||
}
|
||||
|
||||
func StdBytesMarshalTo(v []byte, data []byte) (int, error) {
|
||||
pv := &BytesValue{Value: v}
|
||||
return pv.MarshalTo(data)
|
||||
}
|
||||
|
||||
func StdBytesUnmarshal(v *[]byte, data []byte) error {
|
||||
pv := &BytesValue{}
|
||||
if err := pv.Unmarshal(data); err != nil {
|
||||
return err
|
||||
}
|
||||
*v = pv.Value
|
||||
return nil
|
||||
}
|
71
vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
generated
vendored
71
vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go
generated
vendored
|
@ -1,71 +0,0 @@
|
|||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto
|
||||
|
||||
package wrappers
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
wrapperspb "google.golang.org/protobuf/types/known/wrapperspb"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// Symbols defined in public import of google/protobuf/wrappers.proto.
|
||||
|
||||
type DoubleValue = wrapperspb.DoubleValue
|
||||
type FloatValue = wrapperspb.FloatValue
|
||||
type Int64Value = wrapperspb.Int64Value
|
||||
type UInt64Value = wrapperspb.UInt64Value
|
||||
type Int32Value = wrapperspb.Int32Value
|
||||
type UInt32Value = wrapperspb.UInt32Value
|
||||
type BoolValue = wrapperspb.BoolValue
|
||||
type StringValue = wrapperspb.StringValue
|
||||
type BytesValue = wrapperspb.BytesValue
|
||||
|
||||
var File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = []byte{
|
||||
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2f, 0x77, 0x72, 0x61,
|
||||
0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61,
|
||||
0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
|
||||
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
|
||||
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
|
||||
0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x3b, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65,
|
||||
0x72, 0x73, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = []interface{}{}
|
||||
var file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() }
|
||||
func file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_init() {
|
||||
if File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto != nil {
|
||||
return
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 0,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes,
|
||||
DependencyIndexes: file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs,
|
||||
}.Build()
|
||||
File_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto = out.File
|
||||
file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_rawDesc = nil
|
||||
file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_goTypes = nil
|
||||
file_github_com_golang_protobuf_ptypes_wrappers_wrappers_proto_depIdxs = nil
|
||||
}
|
85
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel
generated
vendored
85
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel
generated
vendored
|
@ -1,85 +0,0 @@
|
|||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"context.go",
|
||||
"convert.go",
|
||||
"doc.go",
|
||||
"errors.go",
|
||||
"fieldmask.go",
|
||||
"handler.go",
|
||||
"marshal_httpbodyproto.go",
|
||||
"marshal_json.go",
|
||||
"marshal_jsonpb.go",
|
||||
"marshal_proto.go",
|
||||
"marshaler.go",
|
||||
"marshaler_registry.go",
|
||||
"mux.go",
|
||||
"pattern.go",
|
||||
"proto2_convert.go",
|
||||
"proto_errors.go",
|
||||
"query.go",
|
||||
],
|
||||
importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime",
|
||||
deps = [
|
||||
"//internal:go_default_library",
|
||||
"//utilities:go_default_library",
|
||||
"@com_github_golang_protobuf//descriptor:go_default_library_gen",
|
||||
"@com_github_golang_protobuf//jsonpb:go_default_library_gen",
|
||||
"@com_github_golang_protobuf//proto:go_default_library",
|
||||
"@go_googleapis//google/api:httpbody_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:any_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:descriptor_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:duration_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//grpclog:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"context_test.go",
|
||||
"convert_test.go",
|
||||
"errors_test.go",
|
||||
"fieldmask_test.go",
|
||||
"handler_test.go",
|
||||
"marshal_httpbodyproto_test.go",
|
||||
"marshal_json_test.go",
|
||||
"marshal_jsonpb_test.go",
|
||||
"marshal_proto_test.go",
|
||||
"marshaler_registry_test.go",
|
||||
"mux_test.go",
|
||||
"pattern_test.go",
|
||||
"query_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//internal:go_default_library",
|
||||
"//runtime/internal/examplepb:go_default_library",
|
||||
"//utilities:go_default_library",
|
||||
"@com_github_golang_protobuf//jsonpb:go_default_library_gen",
|
||||
"@com_github_golang_protobuf//proto:go_default_library",
|
||||
"@com_github_golang_protobuf//ptypes:go_default_library_gen",
|
||||
"@go_googleapis//google/api:httpbody_go_proto",
|
||||
"@go_googleapis//google/rpc:errdetails_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:duration_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:struct_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:timestamp_go_proto",
|
||||
"@io_bazel_rules_go//proto/wkt:wrappers_go_proto",
|
||||
"@org_golang_google_grpc//codes:go_default_library",
|
||||
"@org_golang_google_grpc//metadata:go_default_library",
|
||||
"@org_golang_google_grpc//status:go_default_library",
|
||||
],
|
||||
)
|
236
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
generated
vendored
236
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
generated
vendored
|
@ -1,236 +0,0 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// MetadataHeaderPrefix is the http prefix that represents custom metadata
|
||||
// parameters to or from a gRPC call.
|
||||
const MetadataHeaderPrefix = "Grpc-Metadata-"
|
||||
|
||||
// MetadataPrefix is prepended to permanent HTTP header keys (as specified
|
||||
// by the IANA) when added to the gRPC context.
|
||||
const MetadataPrefix = "grpcgateway-"
|
||||
|
||||
// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to
|
||||
// HTTP headers in a response handled by grpc-gateway
|
||||
const MetadataTrailerPrefix = "Grpc-Trailer-"
|
||||
|
||||
const metadataGrpcTimeout = "Grpc-Timeout"
|
||||
const metadataHeaderBinarySuffix = "-Bin"
|
||||
|
||||
const xForwardedFor = "X-Forwarded-For"
|
||||
const xForwardedHost = "X-Forwarded-Host"
|
||||
|
||||
var (
|
||||
// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
|
||||
// header isn't present. If the value is 0 the sent `context` will not have a timeout.
|
||||
DefaultContextTimeout = 0 * time.Second
|
||||
)
|
||||
|
||||
func decodeBinHeader(v string) ([]byte, error) {
|
||||
if len(v)%4 == 0 {
|
||||
// Input was padded, or padding was not necessary.
|
||||
return base64.StdEncoding.DecodeString(v)
|
||||
}
|
||||
return base64.RawStdEncoding.DecodeString(v)
|
||||
}
|
||||
|
||||
/*
|
||||
AnnotateContext adds context information such as metadata from the request.
|
||||
|
||||
At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For",
|
||||
except that the forwarded destination is not another HTTP service but rather
|
||||
a gRPC service.
|
||||
*/
|
||||
func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) {
|
||||
ctx, md, err := annotateContext(ctx, mux, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if md == nil {
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
return metadata.NewOutgoingContext(ctx, md), nil
|
||||
}
|
||||
|
||||
// AnnotateIncomingContext adds context information such as metadata from the request.
|
||||
// Attach metadata as incoming context.
|
||||
func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) {
|
||||
ctx, md, err := annotateContext(ctx, mux, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if md == nil {
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
return metadata.NewIncomingContext(ctx, md), nil
|
||||
}
|
||||
|
||||
func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, metadata.MD, error) {
|
||||
var pairs []string
|
||||
timeout := DefaultContextTimeout
|
||||
if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
|
||||
var err error
|
||||
timeout, err = timeoutDecode(tm)
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
|
||||
}
|
||||
}
|
||||
|
||||
for key, vals := range req.Header {
|
||||
for _, val := range vals {
|
||||
key = textproto.CanonicalMIMEHeaderKey(key)
|
||||
// For backwards-compatibility, pass through 'authorization' header with no prefix.
|
||||
if key == "Authorization" {
|
||||
pairs = append(pairs, "authorization", val)
|
||||
}
|
||||
if h, ok := mux.incomingHeaderMatcher(key); ok {
|
||||
// Handles "-bin" metadata in grpc, since grpc will do another base64
|
||||
// encode before sending to server, we need to decode it first.
|
||||
if strings.HasSuffix(key, metadataHeaderBinarySuffix) {
|
||||
b, err := decodeBinHeader(val)
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err)
|
||||
}
|
||||
|
||||
val = string(b)
|
||||
}
|
||||
pairs = append(pairs, h, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
if host := req.Header.Get(xForwardedHost); host != "" {
|
||||
pairs = append(pairs, strings.ToLower(xForwardedHost), host)
|
||||
} else if req.Host != "" {
|
||||
pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host)
|
||||
}
|
||||
|
||||
if addr := req.RemoteAddr; addr != "" {
|
||||
if remoteIP, _, err := net.SplitHostPort(addr); err == nil {
|
||||
if fwd := req.Header.Get(xForwardedFor); fwd == "" {
|
||||
pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP)
|
||||
} else {
|
||||
pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP))
|
||||
}
|
||||
} else {
|
||||
grpclog.Infof("invalid remote addr: %s", addr)
|
||||
}
|
||||
}
|
||||
|
||||
if timeout != 0 {
|
||||
ctx, _ = context.WithTimeout(ctx, timeout)
|
||||
}
|
||||
if len(pairs) == 0 {
|
||||
return ctx, nil, nil
|
||||
}
|
||||
md := metadata.Pairs(pairs...)
|
||||
for _, mda := range mux.metadataAnnotators {
|
||||
md = metadata.Join(md, mda(ctx, req))
|
||||
}
|
||||
return ctx, md, nil
|
||||
}
|
||||
|
||||
// ServerMetadata consists of metadata sent from gRPC server.
|
||||
type ServerMetadata struct {
|
||||
HeaderMD metadata.MD
|
||||
TrailerMD metadata.MD
|
||||
}
|
||||
|
||||
type serverMetadataKey struct{}
|
||||
|
||||
// NewServerMetadataContext creates a new context with ServerMetadata
|
||||
func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
|
||||
return context.WithValue(ctx, serverMetadataKey{}, md)
|
||||
}
|
||||
|
||||
// ServerMetadataFromContext returns the ServerMetadata in ctx
|
||||
func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
|
||||
md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
|
||||
return
|
||||
}
|
||||
|
||||
func timeoutDecode(s string) (time.Duration, error) {
|
||||
size := len(s)
|
||||
if size < 2 {
|
||||
return 0, fmt.Errorf("timeout string is too short: %q", s)
|
||||
}
|
||||
d, ok := timeoutUnitToDuration(s[size-1])
|
||||
if !ok {
|
||||
return 0, fmt.Errorf("timeout unit is not recognized: %q", s)
|
||||
}
|
||||
t, err := strconv.ParseInt(s[:size-1], 10, 64)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return d * time.Duration(t), nil
|
||||
}
|
||||
|
||||
func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
|
||||
switch u {
|
||||
case 'H':
|
||||
return time.Hour, true
|
||||
case 'M':
|
||||
return time.Minute, true
|
||||
case 'S':
|
||||
return time.Second, true
|
||||
case 'm':
|
||||
return time.Millisecond, true
|
||||
case 'u':
|
||||
return time.Microsecond, true
|
||||
case 'n':
|
||||
return time.Nanosecond, true
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// isPermanentHTTPHeader checks whether hdr belongs to the list of
|
||||
// permanent request headers maintained by IANA.
|
||||
// http://www.iana.org/assignments/message-headers/message-headers.xml
|
||||
func isPermanentHTTPHeader(hdr string) bool {
|
||||
switch hdr {
|
||||
case
|
||||
"Accept",
|
||||
"Accept-Charset",
|
||||
"Accept-Language",
|
||||
"Accept-Ranges",
|
||||
"Authorization",
|
||||
"Cache-Control",
|
||||
"Content-Type",
|
||||
"Cookie",
|
||||
"Date",
|
||||
"Expect",
|
||||
"From",
|
||||
"Host",
|
||||
"If-Match",
|
||||
"If-Modified-Since",
|
||||
"If-None-Match",
|
||||
"If-Schedule-Tag-Match",
|
||||
"If-Unmodified-Since",
|
||||
"Max-Forwards",
|
||||
"Origin",
|
||||
"Pragma",
|
||||
"Referer",
|
||||
"User-Agent",
|
||||
"Via",
|
||||
"Warning":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
318
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go
generated
vendored
318
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go
generated
vendored
|
@ -1,318 +0,0 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/jsonpb"
|
||||
"github.com/golang/protobuf/ptypes/duration"
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
"github.com/golang/protobuf/ptypes/wrappers"
|
||||
)
|
||||
|
||||
// String just returns the given string.
|
||||
// It is just for compatibility to other types.
|
||||
func String(val string) (string, error) {
|
||||
return val, nil
|
||||
}
|
||||
|
||||
// StringSlice converts 'val' where individual strings are separated by
|
||||
// 'sep' into a string slice.
|
||||
func StringSlice(val, sep string) ([]string, error) {
|
||||
return strings.Split(val, sep), nil
|
||||
}
|
||||
|
||||
// Bool converts the given string representation of a boolean value into bool.
|
||||
func Bool(val string) (bool, error) {
|
||||
return strconv.ParseBool(val)
|
||||
}
|
||||
|
||||
// BoolSlice converts 'val' where individual booleans are separated by
|
||||
// 'sep' into a bool slice.
|
||||
func BoolSlice(val, sep string) ([]bool, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]bool, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Bool(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Float64 converts the given string representation into representation of a floating point number into float64.
|
||||
func Float64(val string) (float64, error) {
|
||||
return strconv.ParseFloat(val, 64)
|
||||
}
|
||||
|
||||
// Float64Slice converts 'val' where individual floating point numbers are separated by
|
||||
// 'sep' into a float64 slice.
|
||||
func Float64Slice(val, sep string) ([]float64, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]float64, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Float64(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Float32 converts the given string representation of a floating point number into float32.
|
||||
func Float32(val string) (float32, error) {
|
||||
f, err := strconv.ParseFloat(val, 32)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return float32(f), nil
|
||||
}
|
||||
|
||||
// Float32Slice converts 'val' where individual floating point numbers are separated by
|
||||
// 'sep' into a float32 slice.
|
||||
func Float32Slice(val, sep string) ([]float32, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]float32, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Float32(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Int64 converts the given string representation of an integer into int64.
|
||||
func Int64(val string) (int64, error) {
|
||||
return strconv.ParseInt(val, 0, 64)
|
||||
}
|
||||
|
||||
// Int64Slice converts 'val' where individual integers are separated by
|
||||
// 'sep' into a int64 slice.
|
||||
func Int64Slice(val, sep string) ([]int64, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]int64, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Int64(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Int32 converts the given string representation of an integer into int32.
|
||||
func Int32(val string) (int32, error) {
|
||||
i, err := strconv.ParseInt(val, 0, 32)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int32(i), nil
|
||||
}
|
||||
|
||||
// Int32Slice converts 'val' where individual integers are separated by
|
||||
// 'sep' into a int32 slice.
|
||||
func Int32Slice(val, sep string) ([]int32, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]int32, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Int32(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Uint64 converts the given string representation of an integer into uint64.
|
||||
func Uint64(val string) (uint64, error) {
|
||||
return strconv.ParseUint(val, 0, 64)
|
||||
}
|
||||
|
||||
// Uint64Slice converts 'val' where individual integers are separated by
|
||||
// 'sep' into a uint64 slice.
|
||||
func Uint64Slice(val, sep string) ([]uint64, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]uint64, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Uint64(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Uint32 converts the given string representation of an integer into uint32.
|
||||
func Uint32(val string) (uint32, error) {
|
||||
i, err := strconv.ParseUint(val, 0, 32)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return uint32(i), nil
|
||||
}
|
||||
|
||||
// Uint32Slice converts 'val' where individual integers are separated by
|
||||
// 'sep' into a uint32 slice.
|
||||
func Uint32Slice(val, sep string) ([]uint32, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]uint32, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Uint32(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Bytes converts the given string representation of a byte sequence into a slice of bytes
|
||||
// A bytes sequence is encoded in URL-safe base64 without padding
|
||||
func Bytes(val string) ([]byte, error) {
|
||||
b, err := base64.StdEncoding.DecodeString(val)
|
||||
if err != nil {
|
||||
b, err = base64.URLEncoding.DecodeString(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe
|
||||
// base64 without padding, are separated by 'sep' into a slice of bytes slices slice.
|
||||
func BytesSlice(val, sep string) ([][]byte, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([][]byte, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Bytes(v)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp.
|
||||
func Timestamp(val string) (*timestamp.Timestamp, error) {
|
||||
var r timestamp.Timestamp
|
||||
err := jsonpb.UnmarshalString(val, &r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &r, nil
|
||||
}
|
||||
|
||||
// Duration converts the given string into a timestamp.Duration.
|
||||
func Duration(val string) (*duration.Duration, error) {
|
||||
var r duration.Duration
|
||||
err := jsonpb.UnmarshalString(val, &r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &r, nil
|
||||
}
|
||||
|
||||
// Enum converts the given string into an int32 that should be type casted into the
|
||||
// correct enum proto type.
|
||||
func Enum(val string, enumValMap map[string]int32) (int32, error) {
|
||||
e, ok := enumValMap[val]
|
||||
if ok {
|
||||
return e, nil
|
||||
}
|
||||
|
||||
i, err := Int32(val)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("%s is not valid", val)
|
||||
}
|
||||
for _, v := range enumValMap {
|
||||
if v == i {
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
return 0, fmt.Errorf("%s is not valid", val)
|
||||
}
|
||||
|
||||
// EnumSlice converts 'val' where individual enums are separated by 'sep'
|
||||
// into a int32 slice. Each individual int32 should be type casted into the
|
||||
// correct enum proto type.
|
||||
func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
|
||||
s := strings.Split(val, sep)
|
||||
values := make([]int32, len(s))
|
||||
for i, v := range s {
|
||||
value, err := Enum(v, enumValMap)
|
||||
if err != nil {
|
||||
return values, err
|
||||
}
|
||||
values[i] = value
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
/*
|
||||
Support fot google.protobuf.wrappers on top of primitive types
|
||||
*/
|
||||
|
||||
// StringValue well-known type support as wrapper around string type
|
||||
func StringValue(val string) (*wrappers.StringValue, error) {
|
||||
return &wrappers.StringValue{Value: val}, nil
|
||||
}
|
||||
|
||||
// FloatValue well-known type support as wrapper around float32 type
|
||||
func FloatValue(val string) (*wrappers.FloatValue, error) {
|
||||
parsedVal, err := Float32(val)
|
||||
return &wrappers.FloatValue{Value: parsedVal}, err
|
||||
}
|
||||
|
||||
// DoubleValue well-known type support as wrapper around float64 type
|
||||
func DoubleValue(val string) (*wrappers.DoubleValue, error) {
|
||||
parsedVal, err := Float64(val)
|
||||
return &wrappers.DoubleValue{Value: parsedVal}, err
|
||||
}
|
||||
|
||||
// BoolValue well-known type support as wrapper around bool type
|
||||
func BoolValue(val string) (*wrappers.BoolValue, error) {
|
||||
parsedVal, err := Bool(val)
|
||||
return &wrappers.BoolValue{Value: parsedVal}, err
|
||||
}
|
||||
|
||||
// Int32Value well-known type support as wrapper around int32 type
|
||||
func Int32Value(val string) (*wrappers.Int32Value, error) {
|
||||
parsedVal, err := Int32(val)
|
||||
return &wrappers.Int32Value{Value: parsedVal}, err
|
||||
}
|
||||
|
||||
// UInt32Value well-known type support as wrapper around uint32 type
|
||||
func UInt32Value(val string) (*wrappers.UInt32Value, error) {
|
||||
parsedVal, err := Uint32(val)
|
||||
return &wrappers.UInt32Value{Value: parsedVal}, err
|
||||
}
|
||||
|
||||
// Int64Value well-known type support as wrapper around int64 type
|
||||
func Int64Value(val string) (*wrappers.Int64Value, error) {
|
||||
parsedVal, err := Int64(val)
|
||||
return &wrappers.Int64Value{Value: parsedVal}, err
|
||||
}
|
||||
|
||||
// UInt64Value well-known type support as wrapper around uint64 type
|
||||
func UInt64Value(val string) (*wrappers.UInt64Value, error) {
|
||||
parsedVal, err := Uint64(val)
|
||||
return &wrappers.UInt64Value{Value: parsedVal}, err
|
||||
}
|
||||
|
||||
// BytesValue well-known type support as wrapper around bytes[] type
|
||||
func BytesValue(val string) (*wrappers.BytesValue, error) {
|
||||
parsedVal, err := Bytes(val)
|
||||
return &wrappers.BytesValue{Value: parsedVal}, err
|
||||
}
|
5
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go
generated
vendored
5
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go
generated
vendored
|
@ -1,5 +0,0 @@
|
|||
/*
|
||||
Package runtime contains runtime helper functions used by
|
||||
servers which protoc-gen-grpc-gateway generates.
|
||||
*/
|
||||
package runtime
|
169
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go
generated
vendored
169
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go
generated
vendored
|
@ -1,169 +0,0 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/internal"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
|
||||
// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
|
||||
func HTTPStatusFromCode(code codes.Code) int {
|
||||
switch code {
|
||||
case codes.OK:
|
||||
return http.StatusOK
|
||||
case codes.Canceled:
|
||||
return http.StatusRequestTimeout
|
||||
case codes.Unknown:
|
||||
return http.StatusInternalServerError
|
||||
case codes.InvalidArgument:
|
||||
return http.StatusBadRequest
|
||||
case codes.DeadlineExceeded:
|
||||
return http.StatusGatewayTimeout
|
||||
case codes.NotFound:
|
||||
return http.StatusNotFound
|
||||
case codes.AlreadyExists:
|
||||
return http.StatusConflict
|
||||
case codes.PermissionDenied:
|
||||
return http.StatusForbidden
|
||||
case codes.Unauthenticated:
|
||||
return http.StatusUnauthorized
|
||||
case codes.ResourceExhausted:
|
||||
return http.StatusTooManyRequests
|
||||
case codes.FailedPrecondition:
|
||||
// Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status.
|
||||
return http.StatusBadRequest
|
||||
case codes.Aborted:
|
||||
return http.StatusConflict
|
||||
case codes.OutOfRange:
|
||||
return http.StatusBadRequest
|
||||
case codes.Unimplemented:
|
||||
return http.StatusNotImplemented
|
||||
case codes.Internal:
|
||||
return http.StatusInternalServerError
|
||||
case codes.Unavailable:
|
||||
return http.StatusServiceUnavailable
|
||||
case codes.DataLoss:
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
|
||||
grpclog.Infof("Unknown gRPC error code: %v", code)
|
||||
return http.StatusInternalServerError
|
||||
}
|
||||
|
||||
var (
|
||||
// HTTPError replies to the request with an error.
|
||||
//
|
||||
// HTTPError is called:
|
||||
// - From generated per-endpoint gateway handler code, when calling the backend results in an error.
|
||||
// - From gateway runtime code, when forwarding the response message results in an error.
|
||||
//
|
||||
// The default value for HTTPError calls the custom error handler configured on the ServeMux via the
|
||||
// WithProtoErrorHandler serve option if that option was used, calling GlobalHTTPErrorHandler otherwise.
|
||||
//
|
||||
// To customize the error handling of a particular ServeMux instance, use the WithProtoErrorHandler
|
||||
// serve option.
|
||||
//
|
||||
// To customize the error format for all ServeMux instances not using the WithProtoErrorHandler serve
|
||||
// option, set GlobalHTTPErrorHandler to a custom function.
|
||||
//
|
||||
// Setting this variable directly to customize error format is deprecated.
|
||||
HTTPError = MuxOrGlobalHTTPError
|
||||
|
||||
// GlobalHTTPErrorHandler is the HTTPError handler for all ServeMux instances not using the
|
||||
// WithProtoErrorHandler serve option.
|
||||
//
|
||||
// You can set a custom function to this variable to customize error format.
|
||||
GlobalHTTPErrorHandler = DefaultHTTPError
|
||||
|
||||
// OtherErrorHandler handles gateway errors from parsing and routing client requests for all
|
||||
// ServeMux instances not using the WithProtoErrorHandler serve option.
|
||||
//
|
||||
// It returns the following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest
|
||||
//
|
||||
// To customize parsing and routing error handling of a particular ServeMux instance, use the
|
||||
// WithProtoErrorHandler serve option.
|
||||
//
|
||||
// To customize parsing and routing error handling of all ServeMux instances not using the
|
||||
// WithProtoErrorHandler serve option, set a custom function to this variable.
|
||||
OtherErrorHandler = DefaultOtherErrorHandler
|
||||
)
|
||||
|
||||
// MuxOrGlobalHTTPError uses the mux-configured error handler, falling back to GlobalErrorHandler.
|
||||
func MuxOrGlobalHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
|
||||
if mux.protoErrorHandler != nil {
|
||||
mux.protoErrorHandler(ctx, mux, marshaler, w, r, err)
|
||||
} else {
|
||||
GlobalHTTPErrorHandler(ctx, mux, marshaler, w, r, err)
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultHTTPError is the default implementation of HTTPError.
|
||||
// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
|
||||
// If otherwise, it replies with http.StatusInternalServerError.
|
||||
//
|
||||
// The response body returned by this function is a JSON object,
|
||||
// which contains a member whose key is "error" and whose value is err.Error().
|
||||
func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
|
||||
const fallback = `{"error": "failed to marshal error message"}`
|
||||
|
||||
s, ok := status.FromError(err)
|
||||
if !ok {
|
||||
s = status.New(codes.Unknown, err.Error())
|
||||
}
|
||||
|
||||
w.Header().Del("Trailer")
|
||||
|
||||
contentType := marshaler.ContentType()
|
||||
// Check marshaler on run time in order to keep backwards compatibility
|
||||
// An interface param needs to be added to the ContentType() function on
|
||||
// the Marshal interface to be able to remove this check
|
||||
if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok {
|
||||
pb := s.Proto()
|
||||
contentType = typeMarshaler.ContentTypeFromMessage(pb)
|
||||
}
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
|
||||
body := &internal.Error{
|
||||
Error: s.Message(),
|
||||
Message: s.Message(),
|
||||
Code: int32(s.Code()),
|
||||
Details: s.Proto().GetDetails(),
|
||||
}
|
||||
|
||||
buf, merr := marshaler.Marshal(body)
|
||||
if merr != nil {
|
||||
grpclog.Infof("Failed to marshal error message %q: %v", body, merr)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
if _, err := io.WriteString(w, fallback); err != nil {
|
||||
grpclog.Infof("Failed to write response: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
md, ok := ServerMetadataFromContext(ctx)
|
||||
if !ok {
|
||||
grpclog.Infof("Failed to extract ServerMetadata from context")
|
||||
}
|
||||
|
||||
handleForwardResponseServerMetadata(w, mux, md)
|
||||
handleForwardResponseTrailerHeader(w, md)
|
||||
st := HTTPStatusFromCode(s.Code())
|
||||
w.WriteHeader(st)
|
||||
if _, err := w.Write(buf); err != nil {
|
||||
grpclog.Infof("Failed to write response: %v", err)
|
||||
}
|
||||
|
||||
handleForwardResponseTrailer(w, md)
|
||||
}
|
||||
|
||||
// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler.
|
||||
// It simply writes a string representation of the given error into "w".
|
||||
func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) {
|
||||
http.Error(w, msg, code)
|
||||
}
|
82
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go
generated
vendored
82
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go
generated
vendored
|
@ -1,82 +0,0 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
descriptor2 "github.com/golang/protobuf/descriptor"
|
||||
"github.com/golang/protobuf/protoc-gen-go/descriptor"
|
||||
"google.golang.org/genproto/protobuf/field_mask"
|
||||
)
|
||||
|
||||
func translateName(name string, md *descriptor.DescriptorProto) (string, *descriptor.DescriptorProto) {
|
||||
// TODO - should really gate this with a test that the marshaller has used json names
|
||||
if md != nil {
|
||||
for _, f := range md.Field {
|
||||
if f.JsonName != nil && f.Name != nil && *f.JsonName == name {
|
||||
var subType *descriptor.DescriptorProto
|
||||
|
||||
// If the field has a TypeName then we retrieve the nested type for translating the embedded message names.
|
||||
if f.TypeName != nil {
|
||||
typeSplit := strings.Split(*f.TypeName, ".")
|
||||
typeName := typeSplit[len(typeSplit)-1]
|
||||
for _, t := range md.NestedType {
|
||||
if typeName == *t.Name {
|
||||
subType = t
|
||||
}
|
||||
}
|
||||
}
|
||||
return *f.Name, subType
|
||||
}
|
||||
}
|
||||
}
|
||||
return name, nil
|
||||
}
|
||||
|
||||
// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body.
|
||||
func FieldMaskFromRequestBody(r io.Reader, md *descriptor.DescriptorProto) (*field_mask.FieldMask, error) {
|
||||
fm := &field_mask.FieldMask{}
|
||||
var root interface{}
|
||||
if err := json.NewDecoder(r).Decode(&root); err != nil {
|
||||
if err == io.EOF {
|
||||
return fm, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
queue := []fieldMaskPathItem{{node: root, md: md}}
|
||||
for len(queue) > 0 {
|
||||
// dequeue an item
|
||||
item := queue[0]
|
||||
queue = queue[1:]
|
||||
|
||||
if m, ok := item.node.(map[string]interface{}); ok {
|
||||
// if the item is an object, then enqueue all of its children
|
||||
for k, v := range m {
|
||||
protoName, subMd := translateName(k, item.md)
|
||||
if subMsg, ok := v.(descriptor2.Message); ok {
|
||||
_, subMd = descriptor2.ForMessage(subMsg)
|
||||
}
|
||||
queue = append(queue, fieldMaskPathItem{path: append(item.path, protoName), node: v, md: subMd})
|
||||
}
|
||||
} else if len(item.path) > 0 {
|
||||
// otherwise, it's a leaf node so print its path
|
||||
fm.Paths = append(fm.Paths, strings.Join(item.path, "."))
|
||||
}
|
||||
}
|
||||
|
||||
return fm, nil
|
||||
}
|
||||
|
||||
// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask
|
||||
type fieldMaskPathItem struct {
|
||||
// the list of prior fields leading up to node
|
||||
path []string
|
||||
|
||||
// a generic decoded json object the current item to inspect for further path extraction
|
||||
node interface{}
|
||||
|
||||
// descriptor for parent message
|
||||
md *descriptor.DescriptorProto
|
||||
}
|
212
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go
generated
vendored
212
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go
generated
vendored
|
@ -1,212 +0,0 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/internal"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var errEmptyResponse = errors.New("empty response")
|
||||
|
||||
// ForwardResponseStream forwards the stream from gRPC server to REST client.
|
||||
func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
|
||||
f, ok := w.(http.Flusher)
|
||||
if !ok {
|
||||
grpclog.Infof("Flush not supported in %T", w)
|
||||
http.Error(w, "unexpected type of web server", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
md, ok := ServerMetadataFromContext(ctx)
|
||||
if !ok {
|
||||
grpclog.Infof("Failed to extract ServerMetadata from context")
|
||||
http.Error(w, "unexpected error", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
handleForwardResponseServerMetadata(w, mux, md)
|
||||
|
||||
w.Header().Set("Transfer-Encoding", "chunked")
|
||||
w.Header().Set("Content-Type", marshaler.ContentType())
|
||||
if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil {
|
||||
HTTPError(ctx, mux, marshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
var delimiter []byte
|
||||
if d, ok := marshaler.(Delimited); ok {
|
||||
delimiter = d.Delimiter()
|
||||
} else {
|
||||
delimiter = []byte("\n")
|
||||
}
|
||||
|
||||
var wroteHeader bool
|
||||
for {
|
||||
resp, err := recv()
|
||||
if err == io.EOF {
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
|
||||
return
|
||||
}
|
||||
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
|
||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
|
||||
return
|
||||
}
|
||||
|
||||
var buf []byte
|
||||
switch {
|
||||
case resp == nil:
|
||||
buf, err = marshaler.Marshal(errorChunk(streamError(ctx, mux.streamErrorHandler, errEmptyResponse)))
|
||||
default:
|
||||
result := map[string]interface{}{"result": resp}
|
||||
if rb, ok := resp.(responseBody); ok {
|
||||
result["result"] = rb.XXX_ResponseBody()
|
||||
}
|
||||
|
||||
buf, err = marshaler.Marshal(result)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
grpclog.Infof("Failed to marshal response chunk: %v", err)
|
||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
|
||||
return
|
||||
}
|
||||
if _, err = w.Write(buf); err != nil {
|
||||
grpclog.Infof("Failed to send response chunk: %v", err)
|
||||
return
|
||||
}
|
||||
wroteHeader = true
|
||||
if _, err = w.Write(delimiter); err != nil {
|
||||
grpclog.Infof("Failed to send delimiter chunk: %v", err)
|
||||
return
|
||||
}
|
||||
f.Flush()
|
||||
}
|
||||
}
|
||||
|
||||
func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
|
||||
for k, vs := range md.HeaderMD {
|
||||
if h, ok := mux.outgoingHeaderMatcher(k); ok {
|
||||
for _, v := range vs {
|
||||
w.Header().Add(h, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) {
|
||||
for k := range md.TrailerMD {
|
||||
tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k))
|
||||
w.Header().Add("Trailer", tKey)
|
||||
}
|
||||
}
|
||||
|
||||
func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) {
|
||||
for k, vs := range md.TrailerMD {
|
||||
tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)
|
||||
for _, v := range vs {
|
||||
w.Header().Add(tKey, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// responseBody interface contains method for getting field for marshaling to the response body
|
||||
// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule`
|
||||
type responseBody interface {
|
||||
XXX_ResponseBody() interface{}
|
||||
}
|
||||
|
||||
// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
|
||||
func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
|
||||
md, ok := ServerMetadataFromContext(ctx)
|
||||
if !ok {
|
||||
grpclog.Infof("Failed to extract ServerMetadata from context")
|
||||
}
|
||||
|
||||
handleForwardResponseServerMetadata(w, mux, md)
|
||||
handleForwardResponseTrailerHeader(w, md)
|
||||
|
||||
contentType := marshaler.ContentType()
|
||||
// Check marshaler on run time in order to keep backwards compatibility
|
||||
// An interface param needs to be added to the ContentType() function on
|
||||
// the Marshal interface to be able to remove this check
|
||||
if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok {
|
||||
contentType = typeMarshaler.ContentTypeFromMessage(resp)
|
||||
}
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
|
||||
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
|
||||
HTTPError(ctx, mux, marshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
var buf []byte
|
||||
var err error
|
||||
if rb, ok := resp.(responseBody); ok {
|
||||
buf, err = marshaler.Marshal(rb.XXX_ResponseBody())
|
||||
} else {
|
||||
buf, err = marshaler.Marshal(resp)
|
||||
}
|
||||
if err != nil {
|
||||
grpclog.Infof("Marshal error: %v", err)
|
||||
HTTPError(ctx, mux, marshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = w.Write(buf); err != nil {
|
||||
grpclog.Infof("Failed to write response: %v", err)
|
||||
}
|
||||
|
||||
handleForwardResponseTrailer(w, md)
|
||||
}
|
||||
|
||||
func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error {
|
||||
if len(opts) == 0 {
|
||||
return nil
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if err := opt(ctx, w, resp); err != nil {
|
||||
grpclog.Infof("Error handling ForwardResponseOptions: %v", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) {
|
||||
serr := streamError(ctx, mux.streamErrorHandler, err)
|
||||
if !wroteHeader {
|
||||
w.WriteHeader(int(serr.HttpCode))
|
||||
}
|
||||
buf, merr := marshaler.Marshal(errorChunk(serr))
|
||||
if merr != nil {
|
||||
grpclog.Infof("Failed to marshal an error: %v", merr)
|
||||
return
|
||||
}
|
||||
if _, werr := w.Write(buf); werr != nil {
|
||||
grpclog.Infof("Failed to notify error to client: %v", werr)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// streamError returns the payload for the final message in a response stream
|
||||
// that represents the given err.
|
||||
func streamError(ctx context.Context, errHandler StreamErrorHandlerFunc, err error) *StreamError {
|
||||
serr := errHandler(ctx, err)
|
||||
if serr != nil {
|
||||
return serr
|
||||
}
|
||||
// TODO: log about misbehaving stream error handler?
|
||||
return DefaultHTTPStreamErrorHandler(ctx, err)
|
||||
}
|
||||
|
||||
func errorChunk(err *StreamError) map[string]proto.Message {
|
||||
return map[string]proto.Message{"error": (*internal.StreamError)(err)}
|
||||
}
|
43
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go
generated
vendored
43
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go
generated
vendored
|
@ -1,43 +0,0 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"google.golang.org/genproto/googleapis/api/httpbody"
|
||||
)
|
||||
|
||||
// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler
|
||||
func SetHTTPBodyMarshaler(serveMux *ServeMux) {
|
||||
serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{
|
||||
Marshaler: &JSONPb{OrigName: true},
|
||||
}
|
||||
}
|
||||
|
||||
// HTTPBodyMarshaler is a Marshaler which supports marshaling of a
|
||||
// google.api.HttpBody message as the full response body if it is
|
||||
// the actual message used as the response. If not, then this will
|
||||
// simply fallback to the Marshaler specified as its default Marshaler.
|
||||
type HTTPBodyMarshaler struct {
|
||||
Marshaler
|
||||
}
|
||||
|
||||
// ContentType implementation to keep backwards compatibility with marshal interface
|
||||
func (h *HTTPBodyMarshaler) ContentType() string {
|
||||
return h.ContentTypeFromMessage(nil)
|
||||
}
|
||||
|
||||
// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns
|
||||
// its specified content type otherwise fall back to the default Marshaler.
|
||||
func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string {
|
||||
if httpBody, ok := v.(*httpbody.HttpBody); ok {
|
||||
return httpBody.GetContentType()
|
||||
}
|
||||
return h.Marshaler.ContentType()
|
||||
}
|
||||
|
||||
// Marshal marshals "v" by returning the body bytes if v is a
|
||||
// google.api.HttpBody message, otherwise it falls back to the default Marshaler.
|
||||
func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) {
|
||||
if httpBody, ok := v.(*httpbody.HttpBody); ok {
|
||||
return httpBody.Data, nil
|
||||
}
|
||||
return h.Marshaler.Marshal(v)
|
||||
}
|
45
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
generated
vendored
45
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go
generated
vendored
|
@ -1,45 +0,0 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
)
|
||||
|
||||
// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON
|
||||
// with the standard "encoding/json" package of Golang.
|
||||
// Although it is generally faster for simple proto messages than JSONPb,
|
||||
// it does not support advanced features of protobuf, e.g. map, oneof, ....
|
||||
//
|
||||
// The NewEncoder and NewDecoder types return *json.Encoder and
|
||||
// *json.Decoder respectively.
|
||||
type JSONBuiltin struct{}
|
||||
|
||||
// ContentType always Returns "application/json".
|
||||
func (*JSONBuiltin) ContentType() string {
|
||||
return "application/json"
|
||||
}
|
||||
|
||||
// Marshal marshals "v" into JSON
|
||||
func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) {
|
||||
return json.Marshal(v)
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals JSON data into "v".
|
||||
func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error {
|
||||
return json.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
// NewDecoder returns a Decoder which reads JSON stream from "r".
|
||||
func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder {
|
||||
return json.NewDecoder(r)
|
||||
}
|
||||
|
||||
// NewEncoder returns an Encoder which writes JSON stream into "w".
|
||||
func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder {
|
||||
return json.NewEncoder(w)
|
||||
}
|
||||
|
||||
// Delimiter for newline encoded JSON streams.
|
||||
func (j *JSONBuiltin) Delimiter() []byte {
|
||||
return []byte("\n")
|
||||
}
|
262
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
generated
vendored
262
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go
generated
vendored
|
@ -1,262 +0,0 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
|
||||
"github.com/golang/protobuf/jsonpb"
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
// JSONPb is a Marshaler which marshals/unmarshals into/from JSON
|
||||
// with the "github.com/golang/protobuf/jsonpb".
|
||||
// It supports fully functionality of protobuf unlike JSONBuiltin.
|
||||
//
|
||||
// The NewDecoder method returns a DecoderWrapper, so the underlying
|
||||
// *json.Decoder methods can be used.
|
||||
type JSONPb jsonpb.Marshaler
|
||||
|
||||
// ContentType always returns "application/json".
|
||||
func (*JSONPb) ContentType() string {
|
||||
return "application/json"
|
||||
}
|
||||
|
||||
// Marshal marshals "v" into JSON.
|
||||
func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
|
||||
if _, ok := v.(proto.Message); !ok {
|
||||
return j.marshalNonProtoField(v)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
if err := j.marshalTo(&buf, v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
|
||||
p, ok := v.(proto.Message)
|
||||
if !ok {
|
||||
buf, err := j.marshalNonProtoField(v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = w.Write(buf)
|
||||
return err
|
||||
}
|
||||
return (*jsonpb.Marshaler)(j).Marshal(w, p)
|
||||
}
|
||||
|
||||
var (
|
||||
// protoMessageType is stored to prevent constant lookup of the same type at runtime.
|
||||
protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
|
||||
)
|
||||
|
||||
// marshalNonProto marshals a non-message field of a protobuf message.
|
||||
// This function does not correctly marshals arbitrary data structure into JSON,
|
||||
// but it is only capable of marshaling non-message field values of protobuf,
|
||||
// i.e. primitive types, enums; pointers to primitives or enums; maps from
|
||||
// integer/string types to primitives/enums/pointers to messages.
|
||||
func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
|
||||
if v == nil {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
rv := reflect.ValueOf(v)
|
||||
for rv.Kind() == reflect.Ptr {
|
||||
if rv.IsNil() {
|
||||
return []byte("null"), nil
|
||||
}
|
||||
rv = rv.Elem()
|
||||
}
|
||||
|
||||
if rv.Kind() == reflect.Slice {
|
||||
if rv.IsNil() {
|
||||
if j.EmitDefaults {
|
||||
return []byte("[]"), nil
|
||||
}
|
||||
return []byte("null"), nil
|
||||
}
|
||||
|
||||
if rv.Type().Elem().Implements(protoMessageType) {
|
||||
var buf bytes.Buffer
|
||||
err := buf.WriteByte('[')
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
if i != 0 {
|
||||
err = buf.WriteByte(',')
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
err = buf.WriteByte(']')
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
}
|
||||
|
||||
if rv.Kind() == reflect.Map {
|
||||
m := make(map[string]*json.RawMessage)
|
||||
for _, k := range rv.MapKeys() {
|
||||
buf, err := j.Marshal(rv.MapIndex(k).Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
|
||||
}
|
||||
if j.Indent != "" {
|
||||
return json.MarshalIndent(m, "", j.Indent)
|
||||
}
|
||||
return json.Marshal(m)
|
||||
}
|
||||
if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts {
|
||||
return json.Marshal(enum.String())
|
||||
}
|
||||
return json.Marshal(rv.Interface())
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals JSON "data" into "v"
|
||||
func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
|
||||
return unmarshalJSONPb(data, v)
|
||||
}
|
||||
|
||||
// NewDecoder returns a Decoder which reads JSON stream from "r".
|
||||
func (j *JSONPb) NewDecoder(r io.Reader) Decoder {
|
||||
d := json.NewDecoder(r)
|
||||
return DecoderWrapper{Decoder: d}
|
||||
}
|
||||
|
||||
// DecoderWrapper is a wrapper around a *json.Decoder that adds
|
||||
// support for protos to the Decode method.
|
||||
type DecoderWrapper struct {
|
||||
*json.Decoder
|
||||
}
|
||||
|
||||
// Decode wraps the embedded decoder's Decode method to support
|
||||
// protos using a jsonpb.Unmarshaler.
|
||||
func (d DecoderWrapper) Decode(v interface{}) error {
|
||||
return decodeJSONPb(d.Decoder, v)
|
||||
}
|
||||
|
||||
// NewEncoder returns an Encoder which writes JSON stream into "w".
|
||||
func (j *JSONPb) NewEncoder(w io.Writer) Encoder {
|
||||
return EncoderFunc(func(v interface{}) error {
|
||||
if err := j.marshalTo(w, v); err != nil {
|
||||
return err
|
||||
}
|
||||
// mimic json.Encoder by adding a newline (makes output
|
||||
// easier to read when it contains multiple encoded items)
|
||||
_, err := w.Write(j.Delimiter())
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func unmarshalJSONPb(data []byte, v interface{}) error {
|
||||
d := json.NewDecoder(bytes.NewReader(data))
|
||||
return decodeJSONPb(d, v)
|
||||
}
|
||||
|
||||
func decodeJSONPb(d *json.Decoder, v interface{}) error {
|
||||
p, ok := v.(proto.Message)
|
||||
if !ok {
|
||||
return decodeNonProtoField(d, v)
|
||||
}
|
||||
unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields}
|
||||
return unmarshaler.UnmarshalNext(d, p)
|
||||
}
|
||||
|
||||
func decodeNonProtoField(d *json.Decoder, v interface{}) error {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.Kind() != reflect.Ptr {
|
||||
return fmt.Errorf("%T is not a pointer", v)
|
||||
}
|
||||
for rv.Kind() == reflect.Ptr {
|
||||
if rv.IsNil() {
|
||||
rv.Set(reflect.New(rv.Type().Elem()))
|
||||
}
|
||||
if rv.Type().ConvertibleTo(typeProtoMessage) {
|
||||
unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields}
|
||||
return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message))
|
||||
}
|
||||
rv = rv.Elem()
|
||||
}
|
||||
if rv.Kind() == reflect.Map {
|
||||
if rv.IsNil() {
|
||||
rv.Set(reflect.MakeMap(rv.Type()))
|
||||
}
|
||||
conv, ok := convFromType[rv.Type().Key().Kind()]
|
||||
if !ok {
|
||||
return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key())
|
||||
}
|
||||
|
||||
m := make(map[string]*json.RawMessage)
|
||||
if err := d.Decode(&m); err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range m {
|
||||
result := conv.Call([]reflect.Value{reflect.ValueOf(k)})
|
||||
if err := result[1].Interface(); err != nil {
|
||||
return err.(error)
|
||||
}
|
||||
bk := result[0]
|
||||
bv := reflect.New(rv.Type().Elem())
|
||||
if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil {
|
||||
return err
|
||||
}
|
||||
rv.SetMapIndex(bk, bv.Elem())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if _, ok := rv.Interface().(protoEnum); ok {
|
||||
var repr interface{}
|
||||
if err := d.Decode(&repr); err != nil {
|
||||
return err
|
||||
}
|
||||
switch repr.(type) {
|
||||
case string:
|
||||
// TODO(yugui) Should use proto.StructProperties?
|
||||
return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface())
|
||||
case float64:
|
||||
rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type()))
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface())
|
||||
}
|
||||
}
|
||||
return d.Decode(v)
|
||||
}
|
||||
|
||||
type protoEnum interface {
|
||||
fmt.Stringer
|
||||
EnumDescriptor() ([]byte, []int)
|
||||
}
|
||||
|
||||
var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
|
||||
|
||||
// Delimiter for newline encoded JSON streams.
|
||||
func (j *JSONPb) Delimiter() []byte {
|
||||
return []byte("\n")
|
||||
}
|
||||
|
||||
// allowUnknownFields helps not to return an error when the destination
|
||||
// is a struct and the input contains object keys which do not match any
|
||||
// non-ignored, exported fields in the destination.
|
||||
var allowUnknownFields = true
|
||||
|
||||
// DisallowUnknownFields enables option in decoder (unmarshaller) to
|
||||
// return an error when it finds an unknown field. This function must be
|
||||
// called before using the JSON marshaller.
|
||||
func DisallowUnknownFields() {
|
||||
allowUnknownFields = false
|
||||
}
|
62
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go
generated
vendored
62
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go
generated
vendored
|
@ -1,62 +0,0 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"errors"
|
||||
"github.com/golang/protobuf/proto"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes
|
||||
type ProtoMarshaller struct{}
|
||||
|
||||
// ContentType always returns "application/octet-stream".
|
||||
func (*ProtoMarshaller) ContentType() string {
|
||||
return "application/octet-stream"
|
||||
}
|
||||
|
||||
// Marshal marshals "value" into Proto
|
||||
func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) {
|
||||
message, ok := value.(proto.Message)
|
||||
if !ok {
|
||||
return nil, errors.New("unable to marshal non proto field")
|
||||
}
|
||||
return proto.Marshal(message)
|
||||
}
|
||||
|
||||
// Unmarshal unmarshals proto "data" into "value"
|
||||
func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error {
|
||||
message, ok := value.(proto.Message)
|
||||
if !ok {
|
||||
return errors.New("unable to unmarshal non proto field")
|
||||
}
|
||||
return proto.Unmarshal(data, message)
|
||||
}
|
||||
|
||||
// NewDecoder returns a Decoder which reads proto stream from "reader".
|
||||
func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder {
|
||||
return DecoderFunc(func(value interface{}) error {
|
||||
buffer, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return marshaller.Unmarshal(buffer, value)
|
||||
})
|
||||
}
|
||||
|
||||
// NewEncoder returns an Encoder which writes proto stream into "writer".
|
||||
func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder {
|
||||
return EncoderFunc(func(value interface{}) error {
|
||||
buffer, err := marshaller.Marshal(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = writer.Write(buffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
55
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
generated
vendored
55
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go
generated
vendored
|
@ -1,55 +0,0 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// Marshaler defines a conversion between byte sequence and gRPC payloads / fields.
|
||||
type Marshaler interface {
|
||||
// Marshal marshals "v" into byte sequence.
|
||||
Marshal(v interface{}) ([]byte, error)
|
||||
// Unmarshal unmarshals "data" into "v".
|
||||
// "v" must be a pointer value.
|
||||
Unmarshal(data []byte, v interface{}) error
|
||||
// NewDecoder returns a Decoder which reads byte sequence from "r".
|
||||
NewDecoder(r io.Reader) Decoder
|
||||
// NewEncoder returns an Encoder which writes bytes sequence into "w".
|
||||
NewEncoder(w io.Writer) Encoder
|
||||
// ContentType returns the Content-Type which this marshaler is responsible for.
|
||||
ContentType() string
|
||||
}
|
||||
|
||||
// Marshalers that implement contentTypeMarshaler will have their ContentTypeFromMessage method called
|
||||
// to set the Content-Type header on the response
|
||||
type contentTypeMarshaler interface {
|
||||
// ContentTypeFromMessage returns the Content-Type this marshaler produces from the provided message
|
||||
ContentTypeFromMessage(v interface{}) string
|
||||
}
|
||||
|
||||
// Decoder decodes a byte sequence
|
||||
type Decoder interface {
|
||||
Decode(v interface{}) error
|
||||
}
|
||||
|
||||
// Encoder encodes gRPC payloads / fields into byte sequence.
|
||||
type Encoder interface {
|
||||
Encode(v interface{}) error
|
||||
}
|
||||
|
||||
// DecoderFunc adapts an decoder function into Decoder.
|
||||
type DecoderFunc func(v interface{}) error
|
||||
|
||||
// Decode delegates invocations to the underlying function itself.
|
||||
func (f DecoderFunc) Decode(v interface{}) error { return f(v) }
|
||||
|
||||
// EncoderFunc adapts an encoder function into Encoder
|
||||
type EncoderFunc func(v interface{}) error
|
||||
|
||||
// Encode delegates invocations to the underlying function itself.
|
||||
func (f EncoderFunc) Encode(v interface{}) error { return f(v) }
|
||||
|
||||
// Delimited defines the streaming delimiter.
|
||||
type Delimited interface {
|
||||
// Delimiter returns the record separator for the stream.
|
||||
Delimiter() []byte
|
||||
}
|
91
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
generated
vendored
91
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go
generated
vendored
|
@ -1,91 +0,0 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
// MIMEWildcard is the fallback MIME type used for requests which do not match
|
||||
// a registered MIME type.
|
||||
const MIMEWildcard = "*"
|
||||
|
||||
var (
|
||||
acceptHeader = http.CanonicalHeaderKey("Accept")
|
||||
contentTypeHeader = http.CanonicalHeaderKey("Content-Type")
|
||||
|
||||
defaultMarshaler = &JSONPb{OrigName: true}
|
||||
)
|
||||
|
||||
// MarshalerForRequest returns the inbound/outbound marshalers for this request.
|
||||
// It checks the registry on the ServeMux for the MIME type set by the Content-Type header.
|
||||
// If it isn't set (or the request Content-Type is empty), checks for "*".
|
||||
// If there are multiple Content-Type headers set, choose the first one that it can
|
||||
// exactly match in the registry.
|
||||
// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler.
|
||||
func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) {
|
||||
for _, acceptVal := range r.Header[acceptHeader] {
|
||||
if m, ok := mux.marshalers.mimeMap[acceptVal]; ok {
|
||||
outbound = m
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
for _, contentTypeVal := range r.Header[contentTypeHeader] {
|
||||
if m, ok := mux.marshalers.mimeMap[contentTypeVal]; ok {
|
||||
inbound = m
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if inbound == nil {
|
||||
inbound = mux.marshalers.mimeMap[MIMEWildcard]
|
||||
}
|
||||
if outbound == nil {
|
||||
outbound = inbound
|
||||
}
|
||||
|
||||
return inbound, outbound
|
||||
}
|
||||
|
||||
// marshalerRegistry is a mapping from MIME types to Marshalers.
|
||||
type marshalerRegistry struct {
|
||||
mimeMap map[string]Marshaler
|
||||
}
|
||||
|
||||
// add adds a marshaler for a case-sensitive MIME type string ("*" to match any
|
||||
// MIME type).
|
||||
func (m marshalerRegistry) add(mime string, marshaler Marshaler) error {
|
||||
if len(mime) == 0 {
|
||||
return errors.New("empty MIME type")
|
||||
}
|
||||
|
||||
m.mimeMap[mime] = marshaler
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// makeMarshalerMIMERegistry returns a new registry of marshalers.
|
||||
// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
|
||||
//
|
||||
// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
|
||||
// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
|
||||
// with a "application/json" Content-Type.
|
||||
// "*" can be used to match any Content-Type.
|
||||
// This can be attached to a ServerMux with the marshaler option.
|
||||
func makeMarshalerMIMERegistry() marshalerRegistry {
|
||||
return marshalerRegistry{
|
||||
mimeMap: map[string]Marshaler{
|
||||
MIMEWildcard: defaultMarshaler,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound
|
||||
// Marshalers to a MIME type in mux.
|
||||
func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption {
|
||||
return func(mux *ServeMux) {
|
||||
if err := mux.marshalers.add(mime, marshaler); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
300
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go
generated
vendored
300
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go
generated
vendored
|
@ -1,300 +0,0 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// A HandlerFunc handles a specific pair of path pattern and HTTP method.
|
||||
type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
|
||||
|
||||
// ErrUnknownURI is the error supplied to a custom ProtoErrorHandlerFunc when
|
||||
// a request is received with a URI path that does not match any registered
|
||||
// service method.
|
||||
//
|
||||
// Since gRPC servers return an "Unimplemented" code for requests with an
|
||||
// unrecognized URI path, this error also has a gRPC "Unimplemented" code.
|
||||
var ErrUnknownURI = status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented))
|
||||
|
||||
// ServeMux is a request multiplexer for grpc-gateway.
|
||||
// It matches http requests to patterns and invokes the corresponding handler.
|
||||
type ServeMux struct {
|
||||
// handlers maps HTTP method to a list of handlers.
|
||||
handlers map[string][]handler
|
||||
forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error
|
||||
marshalers marshalerRegistry
|
||||
incomingHeaderMatcher HeaderMatcherFunc
|
||||
outgoingHeaderMatcher HeaderMatcherFunc
|
||||
metadataAnnotators []func(context.Context, *http.Request) metadata.MD
|
||||
streamErrorHandler StreamErrorHandlerFunc
|
||||
protoErrorHandler ProtoErrorHandlerFunc
|
||||
disablePathLengthFallback bool
|
||||
lastMatchWins bool
|
||||
}
|
||||
|
||||
// ServeMuxOption is an option that can be given to a ServeMux on construction.
|
||||
type ServeMuxOption func(*ServeMux)
|
||||
|
||||
// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
|
||||
//
|
||||
// forwardResponseOption is an option that will be called on the relevant context.Context,
|
||||
// http.ResponseWriter, and proto.Message before every forwarded response.
|
||||
//
|
||||
// The message may be nil in the case where just a header is being sent.
|
||||
func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption {
|
||||
return func(serveMux *ServeMux) {
|
||||
serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption)
|
||||
}
|
||||
}
|
||||
|
||||
// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters.
|
||||
// Configuring this will mean the generated swagger output is no longer correct, and it should be
|
||||
// done with careful consideration.
|
||||
func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption {
|
||||
return func(serveMux *ServeMux) {
|
||||
currentQueryParser = queryParameterParser
|
||||
}
|
||||
}
|
||||
|
||||
// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context.
|
||||
type HeaderMatcherFunc func(string) (string, bool)
|
||||
|
||||
// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
|
||||
// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with
|
||||
// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'.
|
||||
func DefaultHeaderMatcher(key string) (string, bool) {
|
||||
key = textproto.CanonicalMIMEHeaderKey(key)
|
||||
if isPermanentHTTPHeader(key) {
|
||||
return MetadataPrefix + key, true
|
||||
} else if strings.HasPrefix(key, MetadataHeaderPrefix) {
|
||||
return key[len(MetadataHeaderPrefix):], true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
|
||||
//
|
||||
// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
|
||||
// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
|
||||
func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
|
||||
return func(mux *ServeMux) {
|
||||
mux.incomingHeaderMatcher = fn
|
||||
}
|
||||
}
|
||||
|
||||
// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
|
||||
//
|
||||
// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
|
||||
// passed to http response returned from gateway. To transform the header before passing to response,
|
||||
// matcher should return modified header.
|
||||
func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
|
||||
return func(mux *ServeMux) {
|
||||
mux.outgoingHeaderMatcher = fn
|
||||
}
|
||||
}
|
||||
|
||||
// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
|
||||
//
|
||||
// This can be used by services that need to read from http.Request and modify gRPC context. A common use case
|
||||
// is reading token from cookie and adding it in gRPC context.
|
||||
func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption {
|
||||
return func(serveMux *ServeMux) {
|
||||
serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator)
|
||||
}
|
||||
}
|
||||
|
||||
// WithProtoErrorHandler returns a ServeMuxOption for configuring a custom error handler.
|
||||
//
|
||||
// This can be used to handle an error as general proto message defined by gRPC.
|
||||
// When this option is used, the mux uses the configured error handler instead of HTTPError and
|
||||
// OtherErrorHandler.
|
||||
func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption {
|
||||
return func(serveMux *ServeMux) {
|
||||
serveMux.protoErrorHandler = fn
|
||||
}
|
||||
}
|
||||
|
||||
// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback.
|
||||
func WithDisablePathLengthFallback() ServeMuxOption {
|
||||
return func(serveMux *ServeMux) {
|
||||
serveMux.disablePathLengthFallback = true
|
||||
}
|
||||
}
|
||||
|
||||
// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream
|
||||
// error handler, which allows for customizing the error trailer for server-streaming
|
||||
// calls.
|
||||
//
|
||||
// For stream errors that occur before any response has been written, the mux's
|
||||
// ProtoErrorHandler will be invoked. However, once data has been written, the errors must
|
||||
// be handled differently: they must be included in the response body. The response body's
|
||||
// final message will include the error details returned by the stream error handler.
|
||||
func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption {
|
||||
return func(serveMux *ServeMux) {
|
||||
serveMux.streamErrorHandler = fn
|
||||
}
|
||||
}
|
||||
|
||||
// WithLastMatchWins returns a ServeMuxOption that will enable "last
|
||||
// match wins" behavior, where if multiple path patterns match a
|
||||
// request path, the last one defined in the .proto file will be used.
|
||||
func WithLastMatchWins() ServeMuxOption {
|
||||
return func(serveMux *ServeMux) {
|
||||
serveMux.lastMatchWins = true
|
||||
}
|
||||
}
|
||||
|
||||
// NewServeMux returns a new ServeMux whose internal mapping is empty.
|
||||
func NewServeMux(opts ...ServeMuxOption) *ServeMux {
|
||||
serveMux := &ServeMux{
|
||||
handlers: make(map[string][]handler),
|
||||
forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
|
||||
marshalers: makeMarshalerMIMERegistry(),
|
||||
streamErrorHandler: DefaultHTTPStreamErrorHandler,
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(serveMux)
|
||||
}
|
||||
|
||||
if serveMux.incomingHeaderMatcher == nil {
|
||||
serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
|
||||
}
|
||||
|
||||
if serveMux.outgoingHeaderMatcher == nil {
|
||||
serveMux.outgoingHeaderMatcher = func(key string) (string, bool) {
|
||||
return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
|
||||
}
|
||||
}
|
||||
|
||||
return serveMux
|
||||
}
|
||||
|
||||
// Handle associates "h" to the pair of HTTP method and path pattern.
|
||||
func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
|
||||
if s.lastMatchWins {
|
||||
s.handlers[meth] = append([]handler{handler{pat: pat, h: h}}, s.handlers[meth]...)
|
||||
} else {
|
||||
s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h})
|
||||
}
|
||||
}
|
||||
|
||||
// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path.
|
||||
func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
path := r.URL.Path
|
||||
if !strings.HasPrefix(path, "/") {
|
||||
if s.protoErrorHandler != nil {
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest))
|
||||
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
|
||||
} else {
|
||||
OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
components := strings.Split(path[1:], "/")
|
||||
l := len(components)
|
||||
var verb string
|
||||
if idx := strings.LastIndex(components[l-1], ":"); idx == 0 {
|
||||
if s.protoErrorHandler != nil {
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
|
||||
} else {
|
||||
OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
||||
}
|
||||
return
|
||||
} else if idx > 0 {
|
||||
c := components[l-1]
|
||||
components[l-1], verb = c[:idx], c[idx+1:]
|
||||
}
|
||||
|
||||
if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
|
||||
r.Method = strings.ToUpper(override)
|
||||
if err := r.ParseForm(); err != nil {
|
||||
if s.protoErrorHandler != nil {
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
sterr := status.Error(codes.InvalidArgument, err.Error())
|
||||
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
|
||||
} else {
|
||||
OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
for _, h := range s.handlers[r.Method] {
|
||||
pathParams, err := h.pat.Match(components, verb)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
h.h(w, r, pathParams)
|
||||
return
|
||||
}
|
||||
|
||||
// lookup other methods to handle fallback from GET to POST and
|
||||
// to determine if it is MethodNotAllowed or NotFound.
|
||||
for m, handlers := range s.handlers {
|
||||
if m == r.Method {
|
||||
continue
|
||||
}
|
||||
for _, h := range handlers {
|
||||
pathParams, err := h.pat.Match(components, verb)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
// X-HTTP-Method-Override is optional. Always allow fallback to POST.
|
||||
if s.isPathLengthFallback(r) {
|
||||
if err := r.ParseForm(); err != nil {
|
||||
if s.protoErrorHandler != nil {
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
sterr := status.Error(codes.InvalidArgument, err.Error())
|
||||
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr)
|
||||
} else {
|
||||
OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest)
|
||||
}
|
||||
return
|
||||
}
|
||||
h.h(w, r, pathParams)
|
||||
return
|
||||
}
|
||||
if s.protoErrorHandler != nil {
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
|
||||
} else {
|
||||
OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if s.protoErrorHandler != nil {
|
||||
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||
s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI)
|
||||
} else {
|
||||
OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.
|
||||
func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error {
|
||||
return s.forwardResponseOptions
|
||||
}
|
||||
|
||||
func (s *ServeMux) isPathLengthFallback(r *http.Request) bool {
|
||||
return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
|
||||
}
|
||||
|
||||
type handler struct {
|
||||
pat Pattern
|
||||
h HandlerFunc
|
||||
}
|
262
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go
generated
vendored
262
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go
generated
vendored
|
@ -1,262 +0,0 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/utilities"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotMatch indicates that the given HTTP request path does not match to the pattern.
|
||||
ErrNotMatch = errors.New("not match to the path pattern")
|
||||
// ErrInvalidPattern indicates that the given definition of Pattern is not valid.
|
||||
ErrInvalidPattern = errors.New("invalid pattern")
|
||||
)
|
||||
|
||||
type op struct {
|
||||
code utilities.OpCode
|
||||
operand int
|
||||
}
|
||||
|
||||
// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto.
|
||||
type Pattern struct {
|
||||
// ops is a list of operations
|
||||
ops []op
|
||||
// pool is a constant pool indexed by the operands or vars.
|
||||
pool []string
|
||||
// vars is a list of variables names to be bound by this pattern
|
||||
vars []string
|
||||
// stacksize is the max depth of the stack
|
||||
stacksize int
|
||||
// tailLen is the length of the fixed-size segments after a deep wildcard
|
||||
tailLen int
|
||||
// verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part.
|
||||
verb string
|
||||
// assumeColonVerb indicates whether a path suffix after a final
|
||||
// colon may only be interpreted as a verb.
|
||||
assumeColonVerb bool
|
||||
}
|
||||
|
||||
type patternOptions struct {
|
||||
assumeColonVerb bool
|
||||
}
|
||||
|
||||
// PatternOpt is an option for creating Patterns.
|
||||
type PatternOpt func(*patternOptions)
|
||||
|
||||
// NewPattern returns a new Pattern from the given definition values.
|
||||
// "ops" is a sequence of op codes. "pool" is a constant pool.
|
||||
// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part.
|
||||
// "version" must be 1 for now.
|
||||
// It returns an error if the given definition is invalid.
|
||||
func NewPattern(version int, ops []int, pool []string, verb string, opts ...PatternOpt) (Pattern, error) {
|
||||
options := patternOptions{
|
||||
assumeColonVerb: true,
|
||||
}
|
||||
for _, o := range opts {
|
||||
o(&options)
|
||||
}
|
||||
|
||||
if version != 1 {
|
||||
grpclog.Infof("unsupported version: %d", version)
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
|
||||
l := len(ops)
|
||||
if l%2 != 0 {
|
||||
grpclog.Infof("odd number of ops codes: %d", l)
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
|
||||
var (
|
||||
typedOps []op
|
||||
stack, maxstack int
|
||||
tailLen int
|
||||
pushMSeen bool
|
||||
vars []string
|
||||
)
|
||||
for i := 0; i < l; i += 2 {
|
||||
op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]}
|
||||
switch op.code {
|
||||
case utilities.OpNop:
|
||||
continue
|
||||
case utilities.OpPush:
|
||||
if pushMSeen {
|
||||
tailLen++
|
||||
}
|
||||
stack++
|
||||
case utilities.OpPushM:
|
||||
if pushMSeen {
|
||||
grpclog.Infof("pushM appears twice")
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
pushMSeen = true
|
||||
stack++
|
||||
case utilities.OpLitPush:
|
||||
if op.operand < 0 || len(pool) <= op.operand {
|
||||
grpclog.Infof("negative literal index: %d", op.operand)
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
if pushMSeen {
|
||||
tailLen++
|
||||
}
|
||||
stack++
|
||||
case utilities.OpConcatN:
|
||||
if op.operand <= 0 {
|
||||
grpclog.Infof("negative concat size: %d", op.operand)
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
stack -= op.operand
|
||||
if stack < 0 {
|
||||
grpclog.Print("stack underflow")
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
stack++
|
||||
case utilities.OpCapture:
|
||||
if op.operand < 0 || len(pool) <= op.operand {
|
||||
grpclog.Infof("variable name index out of bound: %d", op.operand)
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
v := pool[op.operand]
|
||||
op.operand = len(vars)
|
||||
vars = append(vars, v)
|
||||
stack--
|
||||
if stack < 0 {
|
||||
grpclog.Infof("stack underflow")
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
default:
|
||||
grpclog.Infof("invalid opcode: %d", op.code)
|
||||
return Pattern{}, ErrInvalidPattern
|
||||
}
|
||||
|
||||
if maxstack < stack {
|
||||
maxstack = stack
|
||||
}
|
||||
typedOps = append(typedOps, op)
|
||||
}
|
||||
return Pattern{
|
||||
ops: typedOps,
|
||||
pool: pool,
|
||||
vars: vars,
|
||||
stacksize: maxstack,
|
||||
tailLen: tailLen,
|
||||
verb: verb,
|
||||
assumeColonVerb: options.assumeColonVerb,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization.
|
||||
func MustPattern(p Pattern, err error) Pattern {
|
||||
if err != nil {
|
||||
grpclog.Fatalf("Pattern initialization failed: %v", err)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Match examines components if it matches to the Pattern.
|
||||
// If it matches, the function returns a mapping from field paths to their captured values.
|
||||
// If otherwise, the function returns an error.
|
||||
func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
|
||||
if p.verb != verb {
|
||||
if p.assumeColonVerb || p.verb != "" {
|
||||
return nil, ErrNotMatch
|
||||
}
|
||||
if len(components) == 0 {
|
||||
components = []string{":" + verb}
|
||||
} else {
|
||||
components = append([]string{}, components...)
|
||||
components[len(components)-1] += ":" + verb
|
||||
}
|
||||
verb = ""
|
||||
}
|
||||
|
||||
var pos int
|
||||
stack := make([]string, 0, p.stacksize)
|
||||
captured := make([]string, len(p.vars))
|
||||
l := len(components)
|
||||
for _, op := range p.ops {
|
||||
switch op.code {
|
||||
case utilities.OpNop:
|
||||
continue
|
||||
case utilities.OpPush, utilities.OpLitPush:
|
||||
if pos >= l {
|
||||
return nil, ErrNotMatch
|
||||
}
|
||||
c := components[pos]
|
||||
if op.code == utilities.OpLitPush {
|
||||
if lit := p.pool[op.operand]; c != lit {
|
||||
return nil, ErrNotMatch
|
||||
}
|
||||
}
|
||||
stack = append(stack, c)
|
||||
pos++
|
||||
case utilities.OpPushM:
|
||||
end := len(components)
|
||||
if end < pos+p.tailLen {
|
||||
return nil, ErrNotMatch
|
||||
}
|
||||
end -= p.tailLen
|
||||
stack = append(stack, strings.Join(components[pos:end], "/"))
|
||||
pos = end
|
||||
case utilities.OpConcatN:
|
||||
n := op.operand
|
||||
l := len(stack) - n
|
||||
stack = append(stack[:l], strings.Join(stack[l:], "/"))
|
||||
case utilities.OpCapture:
|
||||
n := len(stack) - 1
|
||||
captured[op.operand] = stack[n]
|
||||
stack = stack[:n]
|
||||
}
|
||||
}
|
||||
if pos < l {
|
||||
return nil, ErrNotMatch
|
||||
}
|
||||
bindings := make(map[string]string)
|
||||
for i, val := range captured {
|
||||
bindings[p.vars[i]] = val
|
||||
}
|
||||
return bindings, nil
|
||||
}
|
||||
|
||||
// Verb returns the verb part of the Pattern.
|
||||
func (p Pattern) Verb() string { return p.verb }
|
||||
|
||||
func (p Pattern) String() string {
|
||||
var stack []string
|
||||
for _, op := range p.ops {
|
||||
switch op.code {
|
||||
case utilities.OpNop:
|
||||
continue
|
||||
case utilities.OpPush:
|
||||
stack = append(stack, "*")
|
||||
case utilities.OpLitPush:
|
||||
stack = append(stack, p.pool[op.operand])
|
||||
case utilities.OpPushM:
|
||||
stack = append(stack, "**")
|
||||
case utilities.OpConcatN:
|
||||
n := op.operand
|
||||
l := len(stack) - n
|
||||
stack = append(stack[:l], strings.Join(stack[l:], "/"))
|
||||
case utilities.OpCapture:
|
||||
n := len(stack) - 1
|
||||
stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n])
|
||||
}
|
||||
}
|
||||
segs := strings.Join(stack, "/")
|
||||
if p.verb != "" {
|
||||
return fmt.Sprintf("/%s:%s", segs, p.verb)
|
||||
}
|
||||
return "/" + segs
|
||||
}
|
||||
|
||||
// AssumeColonVerbOpt indicates whether a path suffix after a final
|
||||
// colon may only be interpreted as a verb.
|
||||
func AssumeColonVerbOpt(val bool) PatternOpt {
|
||||
return PatternOpt(func(o *patternOptions) {
|
||||
o.assumeColonVerb = val
|
||||
})
|
||||
}
|
80
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
generated
vendored
80
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go
generated
vendored
|
@ -1,80 +0,0 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
// StringP returns a pointer to a string whose pointee is same as the given string value.
|
||||
func StringP(val string) (*string, error) {
|
||||
return proto.String(val), nil
|
||||
}
|
||||
|
||||
// BoolP parses the given string representation of a boolean value,
|
||||
// and returns a pointer to a bool whose value is same as the parsed value.
|
||||
func BoolP(val string) (*bool, error) {
|
||||
b, err := Bool(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Bool(b), nil
|
||||
}
|
||||
|
||||
// Float64P parses the given string representation of a floating point number,
|
||||
// and returns a pointer to a float64 whose value is same as the parsed number.
|
||||
func Float64P(val string) (*float64, error) {
|
||||
f, err := Float64(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Float64(f), nil
|
||||
}
|
||||
|
||||
// Float32P parses the given string representation of a floating point number,
|
||||
// and returns a pointer to a float32 whose value is same as the parsed number.
|
||||
func Float32P(val string) (*float32, error) {
|
||||
f, err := Float32(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Float32(f), nil
|
||||
}
|
||||
|
||||
// Int64P parses the given string representation of an integer
|
||||
// and returns a pointer to a int64 whose value is same as the parsed integer.
|
||||
func Int64P(val string) (*int64, error) {
|
||||
i, err := Int64(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Int64(i), nil
|
||||
}
|
||||
|
||||
// Int32P parses the given string representation of an integer
|
||||
// and returns a pointer to a int32 whose value is same as the parsed integer.
|
||||
func Int32P(val string) (*int32, error) {
|
||||
i, err := Int32(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Int32(i), err
|
||||
}
|
||||
|
||||
// Uint64P parses the given string representation of an integer
|
||||
// and returns a pointer to a uint64 whose value is same as the parsed integer.
|
||||
func Uint64P(val string) (*uint64, error) {
|
||||
i, err := Uint64(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Uint64(i), err
|
||||
}
|
||||
|
||||
// Uint32P parses the given string representation of an integer
|
||||
// and returns a pointer to a uint32 whose value is same as the parsed integer.
|
||||
func Uint32P(val string) (*uint32, error) {
|
||||
i, err := Uint32(val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return proto.Uint32(i), err
|
||||
}
|
106
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go
generated
vendored
106
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go
generated
vendored
|
@ -1,106 +0,0 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/any"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/internal"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// StreamErrorHandlerFunc accepts an error as a gRPC error generated via status package and translates it into a
|
||||
// a proto struct used to represent error at the end of a stream.
|
||||
type StreamErrorHandlerFunc func(context.Context, error) *StreamError
|
||||
|
||||
// StreamError is the payload for the final message in a server stream in the event that the server returns an
|
||||
// error after a response message has already been sent.
|
||||
type StreamError internal.StreamError
|
||||
|
||||
// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request.
|
||||
type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
|
||||
|
||||
var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler
|
||||
|
||||
// DefaultHTTPProtoErrorHandler is an implementation of HTTPError.
|
||||
// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode.
|
||||
// If otherwise, it replies with http.StatusInternalServerError.
|
||||
//
|
||||
// The response body returned by this function is a Status message marshaled by a Marshaler.
|
||||
//
|
||||
// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead.
|
||||
func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) {
|
||||
// return Internal when Marshal failed
|
||||
const fallback = `{"code": 13, "message": "failed to marshal error message"}`
|
||||
|
||||
s, ok := status.FromError(err)
|
||||
if !ok {
|
||||
s = status.New(codes.Unknown, err.Error())
|
||||
}
|
||||
|
||||
w.Header().Del("Trailer")
|
||||
|
||||
contentType := marshaler.ContentType()
|
||||
// Check marshaler on run time in order to keep backwards compatibility
|
||||
// An interface param needs to be added to the ContentType() function on
|
||||
// the Marshal interface to be able to remove this check
|
||||
if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok {
|
||||
pb := s.Proto()
|
||||
contentType = typeMarshaler.ContentTypeFromMessage(pb)
|
||||
}
|
||||
w.Header().Set("Content-Type", contentType)
|
||||
|
||||
buf, merr := marshaler.Marshal(s.Proto())
|
||||
if merr != nil {
|
||||
grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr)
|
||||
w.WriteHeader(http.StatusInternalServerError)
|
||||
if _, err := io.WriteString(w, fallback); err != nil {
|
||||
grpclog.Infof("Failed to write response: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
md, ok := ServerMetadataFromContext(ctx)
|
||||
if !ok {
|
||||
grpclog.Infof("Failed to extract ServerMetadata from context")
|
||||
}
|
||||
|
||||
handleForwardResponseServerMetadata(w, mux, md)
|
||||
handleForwardResponseTrailerHeader(w, md)
|
||||
st := HTTPStatusFromCode(s.Code())
|
||||
w.WriteHeader(st)
|
||||
if _, err := w.Write(buf); err != nil {
|
||||
grpclog.Infof("Failed to write response: %v", err)
|
||||
}
|
||||
|
||||
handleForwardResponseTrailer(w, md)
|
||||
}
|
||||
|
||||
// DefaultHTTPStreamErrorHandler converts the given err into a *StreamError via
|
||||
// default logic.
|
||||
//
|
||||
// It extracts the gRPC status from err if possible. The fields of the status are
|
||||
// used to populate the returned StreamError, and the HTTP status code is derived
|
||||
// from the gRPC code via HTTPStatusFromCode. If the given err does not contain a
|
||||
// gRPC status, an "Unknown" gRPC code is used and "Internal Server Error" HTTP code.
|
||||
func DefaultHTTPStreamErrorHandler(_ context.Context, err error) *StreamError {
|
||||
grpcCode := codes.Unknown
|
||||
grpcMessage := err.Error()
|
||||
var grpcDetails []*any.Any
|
||||
if s, ok := status.FromError(err); ok {
|
||||
grpcCode = s.Code()
|
||||
grpcMessage = s.Message()
|
||||
grpcDetails = s.Proto().GetDetails()
|
||||
}
|
||||
httpCode := HTTPStatusFromCode(grpcCode)
|
||||
return &StreamError{
|
||||
GrpcCode: int32(grpcCode),
|
||||
HttpCode: int32(httpCode),
|
||||
Message: grpcMessage,
|
||||
HttpStatus: http.StatusText(httpCode),
|
||||
Details: grpcDetails,
|
||||
}
|
||||
}
|
406
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
generated
vendored
406
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
generated
vendored
|
@ -1,406 +0,0 @@
|
|||
package runtime
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/utilities"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
var valuesKeyRegexp = regexp.MustCompile("^(.*)\\[(.*)\\]$")
|
||||
|
||||
var currentQueryParser QueryParameterParser = &defaultQueryParser{}
|
||||
|
||||
// QueryParameterParser defines interface for all query parameter parsers
|
||||
type QueryParameterParser interface {
|
||||
Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error
|
||||
}
|
||||
|
||||
// PopulateQueryParameters parses query parameters
|
||||
// into "msg" using current query parser
|
||||
func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
|
||||
return currentQueryParser.Parse(msg, values, filter)
|
||||
}
|
||||
|
||||
type defaultQueryParser struct{}
|
||||
|
||||
// Parse populates "values" into "msg".
|
||||
// A value is ignored if its key starts with one of the elements in "filter".
|
||||
func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
|
||||
for key, values := range values {
|
||||
match := valuesKeyRegexp.FindStringSubmatch(key)
|
||||
if len(match) == 3 {
|
||||
key = match[1]
|
||||
values = append([]string{match[2]}, values...)
|
||||
}
|
||||
fieldPath := strings.Split(key, ".")
|
||||
if filter.HasCommonPrefix(fieldPath) {
|
||||
continue
|
||||
}
|
||||
if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PopulateFieldFromPath sets a value in a nested Protobuf structure.
|
||||
// It instantiates missing protobuf fields as it goes.
|
||||
func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
|
||||
fieldPath := strings.Split(fieldPathString, ".")
|
||||
return populateFieldValueFromPath(msg, fieldPath, []string{value})
|
||||
}
|
||||
|
||||
func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error {
|
||||
m := reflect.ValueOf(msg)
|
||||
if m.Kind() != reflect.Ptr {
|
||||
return fmt.Errorf("unexpected type %T: %v", msg, msg)
|
||||
}
|
||||
var props *proto.Properties
|
||||
m = m.Elem()
|
||||
for i, fieldName := range fieldPath {
|
||||
isLast := i == len(fieldPath)-1
|
||||
if !isLast && m.Kind() != reflect.Struct {
|
||||
return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, "."))
|
||||
}
|
||||
var f reflect.Value
|
||||
var err error
|
||||
f, props, err = fieldByProtoName(m, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !f.IsValid() {
|
||||
grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, "."))
|
||||
return nil
|
||||
}
|
||||
|
||||
switch f.Kind() {
|
||||
case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64:
|
||||
if !isLast {
|
||||
return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
|
||||
}
|
||||
m = f
|
||||
case reflect.Slice:
|
||||
if !isLast {
|
||||
return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, "."))
|
||||
}
|
||||
// Handle []byte
|
||||
if f.Type().Elem().Kind() == reflect.Uint8 {
|
||||
m = f
|
||||
break
|
||||
}
|
||||
return populateRepeatedField(f, values, props)
|
||||
case reflect.Ptr:
|
||||
if f.IsNil() {
|
||||
m = reflect.New(f.Type().Elem())
|
||||
f.Set(m.Convert(f.Type()))
|
||||
}
|
||||
m = f.Elem()
|
||||
continue
|
||||
case reflect.Struct:
|
||||
m = f
|
||||
continue
|
||||
case reflect.Map:
|
||||
if !isLast {
|
||||
return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], "."))
|
||||
}
|
||||
return populateMapField(f, values, props)
|
||||
default:
|
||||
return fmt.Errorf("unexpected type %s in %T", f.Type(), msg)
|
||||
}
|
||||
}
|
||||
switch len(values) {
|
||||
case 0:
|
||||
return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, "."))
|
||||
case 1:
|
||||
default:
|
||||
grpclog.Infof("too many field values: %s", strings.Join(fieldPath, "."))
|
||||
}
|
||||
return populateField(m, values[0], props)
|
||||
}
|
||||
|
||||
// fieldByProtoName looks up a field whose corresponding protobuf field name is "name".
|
||||
// "m" must be a struct value. It returns zero reflect.Value if no such field found.
|
||||
func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) {
|
||||
props := proto.GetProperties(m.Type())
|
||||
|
||||
// look up field name in oneof map
|
||||
for _, op := range props.OneofTypes {
|
||||
if name == op.Prop.OrigName || name == op.Prop.JSONName {
|
||||
v := reflect.New(op.Type.Elem())
|
||||
field := m.Field(op.Field)
|
||||
if !field.IsNil() {
|
||||
return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName)
|
||||
}
|
||||
field.Set(v)
|
||||
return v.Elem().Field(0), op.Prop, nil
|
||||
}
|
||||
}
|
||||
|
||||
for _, p := range props.Prop {
|
||||
if p.OrigName == name {
|
||||
return m.FieldByName(p.Name), p, nil
|
||||
}
|
||||
if p.JSONName == name {
|
||||
return m.FieldByName(p.Name), p, nil
|
||||
}
|
||||
}
|
||||
return reflect.Value{}, nil, nil
|
||||
}
|
||||
|
||||
func populateMapField(f reflect.Value, values []string, props *proto.Properties) error {
|
||||
if len(values) != 2 {
|
||||
return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name)
|
||||
}
|
||||
|
||||
key, value := values[0], values[1]
|
||||
keyType := f.Type().Key()
|
||||
valueType := f.Type().Elem()
|
||||
if f.IsNil() {
|
||||
f.Set(reflect.MakeMap(f.Type()))
|
||||
}
|
||||
|
||||
keyConv, ok := convFromType[keyType.Kind()]
|
||||
if !ok {
|
||||
return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name)
|
||||
}
|
||||
valueConv, ok := convFromType[valueType.Kind()]
|
||||
if !ok {
|
||||
return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name)
|
||||
}
|
||||
|
||||
keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)})
|
||||
if err := keyV[1].Interface(); err != nil {
|
||||
return err.(error)
|
||||
}
|
||||
valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)})
|
||||
if err := valueV[1].Interface(); err != nil {
|
||||
return err.(error)
|
||||
}
|
||||
|
||||
f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error {
|
||||
elemType := f.Type().Elem()
|
||||
|
||||
// is the destination field a slice of an enumeration type?
|
||||
if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
|
||||
return populateFieldEnumRepeated(f, values, enumValMap)
|
||||
}
|
||||
|
||||
conv, ok := convFromType[elemType.Kind()]
|
||||
if !ok {
|
||||
return fmt.Errorf("unsupported field type %s", elemType)
|
||||
}
|
||||
f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
|
||||
for i, v := range values {
|
||||
result := conv.Call([]reflect.Value{reflect.ValueOf(v)})
|
||||
if err := result[1].Interface(); err != nil {
|
||||
return err.(error)
|
||||
}
|
||||
f.Index(i).Set(result[0].Convert(f.Index(i).Type()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func populateField(f reflect.Value, value string, props *proto.Properties) error {
|
||||
i := f.Addr().Interface()
|
||||
|
||||
// Handle protobuf well known types
|
||||
var name string
|
||||
switch m := i.(type) {
|
||||
case interface{ XXX_WellKnownType() string }:
|
||||
name = m.XXX_WellKnownType()
|
||||
case proto.Message:
|
||||
const wktPrefix = "google.protobuf."
|
||||
if fullName := proto.MessageName(m); strings.HasPrefix(fullName, wktPrefix) {
|
||||
name = fullName[len(wktPrefix):]
|
||||
}
|
||||
}
|
||||
switch name {
|
||||
case "Timestamp":
|
||||
if value == "null" {
|
||||
f.FieldByName("Seconds").SetInt(0)
|
||||
f.FieldByName("Nanos").SetInt(0)
|
||||
return nil
|
||||
}
|
||||
|
||||
t, err := time.Parse(time.RFC3339Nano, value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad Timestamp: %v", err)
|
||||
}
|
||||
f.FieldByName("Seconds").SetInt(int64(t.Unix()))
|
||||
f.FieldByName("Nanos").SetInt(int64(t.Nanosecond()))
|
||||
return nil
|
||||
case "Duration":
|
||||
if value == "null" {
|
||||
f.FieldByName("Seconds").SetInt(0)
|
||||
f.FieldByName("Nanos").SetInt(0)
|
||||
return nil
|
||||
}
|
||||
d, err := time.ParseDuration(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad Duration: %v", err)
|
||||
}
|
||||
|
||||
ns := d.Nanoseconds()
|
||||
s := ns / 1e9
|
||||
ns %= 1e9
|
||||
f.FieldByName("Seconds").SetInt(s)
|
||||
f.FieldByName("Nanos").SetInt(ns)
|
||||
return nil
|
||||
case "DoubleValue":
|
||||
fallthrough
|
||||
case "FloatValue":
|
||||
float64Val, err := strconv.ParseFloat(value, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad DoubleValue: %s", value)
|
||||
}
|
||||
f.FieldByName("Value").SetFloat(float64Val)
|
||||
return nil
|
||||
case "Int64Value":
|
||||
fallthrough
|
||||
case "Int32Value":
|
||||
int64Val, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad DoubleValue: %s", value)
|
||||
}
|
||||
f.FieldByName("Value").SetInt(int64Val)
|
||||
return nil
|
||||
case "UInt64Value":
|
||||
fallthrough
|
||||
case "UInt32Value":
|
||||
uint64Val, err := strconv.ParseUint(value, 10, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad DoubleValue: %s", value)
|
||||
}
|
||||
f.FieldByName("Value").SetUint(uint64Val)
|
||||
return nil
|
||||
case "BoolValue":
|
||||
if value == "true" {
|
||||
f.FieldByName("Value").SetBool(true)
|
||||
} else if value == "false" {
|
||||
f.FieldByName("Value").SetBool(false)
|
||||
} else {
|
||||
return fmt.Errorf("bad BoolValue: %s", value)
|
||||
}
|
||||
return nil
|
||||
case "StringValue":
|
||||
f.FieldByName("Value").SetString(value)
|
||||
return nil
|
||||
case "BytesValue":
|
||||
bytesVal, err := base64.StdEncoding.DecodeString(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad BytesValue: %s", value)
|
||||
}
|
||||
f.FieldByName("Value").SetBytes(bytesVal)
|
||||
return nil
|
||||
case "FieldMask":
|
||||
p := f.FieldByName("Paths")
|
||||
for _, v := range strings.Split(value, ",") {
|
||||
if v != "" {
|
||||
p.Set(reflect.Append(p, reflect.ValueOf(v)))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle Time and Duration stdlib types
|
||||
switch t := i.(type) {
|
||||
case *time.Time:
|
||||
pt, err := time.Parse(time.RFC3339Nano, value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad Timestamp: %v", err)
|
||||
}
|
||||
*t = pt
|
||||
return nil
|
||||
case *time.Duration:
|
||||
d, err := time.ParseDuration(value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad Duration: %v", err)
|
||||
}
|
||||
*t = d
|
||||
return nil
|
||||
}
|
||||
|
||||
// is the destination field an enumeration type?
|
||||
if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil {
|
||||
return populateFieldEnum(f, value, enumValMap)
|
||||
}
|
||||
|
||||
conv, ok := convFromType[f.Kind()]
|
||||
if !ok {
|
||||
return fmt.Errorf("field type %T is not supported in query parameters", i)
|
||||
}
|
||||
result := conv.Call([]reflect.Value{reflect.ValueOf(value)})
|
||||
if err := result[1].Interface(); err != nil {
|
||||
return err.(error)
|
||||
}
|
||||
f.Set(result[0].Convert(f.Type()))
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) {
|
||||
// see if it's an enumeration string
|
||||
if enumVal, ok := enumValMap[value]; ok {
|
||||
return reflect.ValueOf(enumVal).Convert(t), nil
|
||||
}
|
||||
|
||||
// check for an integer that matches an enumeration value
|
||||
eVal, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
|
||||
}
|
||||
for _, v := range enumValMap {
|
||||
if v == int32(eVal) {
|
||||
return reflect.ValueOf(eVal).Convert(t), nil
|
||||
}
|
||||
}
|
||||
return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t)
|
||||
}
|
||||
|
||||
func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error {
|
||||
cval, err := convertEnum(value, f.Type(), enumValMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.Set(cval)
|
||||
return nil
|
||||
}
|
||||
|
||||
func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error {
|
||||
elemType := f.Type().Elem()
|
||||
f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
|
||||
for i, v := range values {
|
||||
result, err := convertEnum(v, elemType, enumValMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.Index(i).Set(result)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
convFromType = map[reflect.Kind]reflect.Value{
|
||||
reflect.String: reflect.ValueOf(String),
|
||||
reflect.Bool: reflect.ValueOf(Bool),
|
||||
reflect.Float64: reflect.ValueOf(Float64),
|
||||
reflect.Float32: reflect.ValueOf(Float32),
|
||||
reflect.Int64: reflect.ValueOf(Int64),
|
||||
reflect.Int32: reflect.ValueOf(Int32),
|
||||
reflect.Uint64: reflect.ValueOf(Uint64),
|
||||
reflect.Uint32: reflect.ValueOf(Uint32),
|
||||
reflect.Slice: reflect.ValueOf(Bytes),
|
||||
}
|
||||
)
|
24
vendor/github.com/soheilhy/cmux/.gitignore
generated
vendored
24
vendor/github.com/soheilhy/cmux/.gitignore
generated
vendored
|
@ -1,24 +0,0 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
29
vendor/github.com/soheilhy/cmux/.travis.yml
generated
vendored
29
vendor/github.com/soheilhy/cmux/.travis.yml
generated
vendored
|
@ -1,29 +0,0 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.6
|
||||
- 1.7
|
||||
- 1.8
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
gobuild_args: -race
|
||||
|
||||
before_install:
|
||||
- if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then go get -u github.com/kisielk/errcheck; fi
|
||||
- if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then go get -u github.com/golang/lint/golint; fi
|
||||
|
||||
before_script:
|
||||
- '! gofmt -s -l . | read'
|
||||
- echo $TRAVIS_GO_VERSION
|
||||
- if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then golint ./...; fi
|
||||
- if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then errcheck ./...; fi
|
||||
- if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then go tool vet .; fi
|
||||
- if [[ $TRAVIS_GO_VERSION == 1.6* ]]; then go tool vet --shadow .; fi
|
||||
|
||||
script:
|
||||
- go test -bench . -v ./...
|
||||
- go test -race -bench . -v ./...
|
12
vendor/github.com/soheilhy/cmux/CONTRIBUTORS
generated
vendored
12
vendor/github.com/soheilhy/cmux/CONTRIBUTORS
generated
vendored
|
@ -1,12 +0,0 @@
|
|||
# The list of people who have contributed code to the cmux repository.
|
||||
#
|
||||
# Auto-generated with:
|
||||
# git log --oneline --pretty=format:'%an <%aE>' | sort -u
|
||||
#
|
||||
Andreas Jaekle <andreas@jaekle.net>
|
||||
Dmitri Shuralyov <shurcooL@gmail.com>
|
||||
Ethan Mosbaugh <emosbaugh@gmail.com>
|
||||
Soheil Hassas Yeganeh <soheil.h.y@gmail.com>
|
||||
Soheil Hassas Yeganeh <soheil@cs.toronto.edu>
|
||||
Tamir Duberstein <tamir@cockroachlabs.com>
|
||||
Tamir Duberstein <tamird@gmail.com>
|
202
vendor/github.com/soheilhy/cmux/LICENSE
generated
vendored
202
vendor/github.com/soheilhy/cmux/LICENSE
generated
vendored
|
@ -1,202 +0,0 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
83
vendor/github.com/soheilhy/cmux/README.md
generated
vendored
83
vendor/github.com/soheilhy/cmux/README.md
generated
vendored
|
@ -1,83 +0,0 @@
|
|||
# cmux: Connection Mux ![Travis Build Status](https://api.travis-ci.org/soheilhy/args.svg?branch=master "Travis Build Status") [![GoDoc](https://godoc.org/github.com/soheilhy/cmux?status.svg)](http://godoc.org/github.com/soheilhy/cmux)
|
||||
|
||||
cmux is a generic Go library to multiplex connections based on
|
||||
their payload. Using cmux, you can serve gRPC, SSH, HTTPS, HTTP,
|
||||
Go RPC, and pretty much any other protocol on the same TCP listener.
|
||||
|
||||
## How-To
|
||||
Simply create your main listener, create a cmux for that listener,
|
||||
and then match connections:
|
||||
```go
|
||||
// Create the main listener.
|
||||
l, err := net.Listen("tcp", ":23456")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// Create a cmux.
|
||||
m := cmux.New(l)
|
||||
|
||||
// Match connections in order:
|
||||
// First grpc, then HTTP, and otherwise Go RPC/TCP.
|
||||
grpcL := m.Match(cmux.HTTP2HeaderField("content-type", "application/grpc"))
|
||||
httpL := m.Match(cmux.HTTP1Fast())
|
||||
trpcL := m.Match(cmux.Any()) // Any means anything that is not yet matched.
|
||||
|
||||
// Create your protocol servers.
|
||||
grpcS := grpc.NewServer()
|
||||
grpchello.RegisterGreeterServer(grpcs, &server{})
|
||||
|
||||
httpS := &http.Server{
|
||||
Handler: &helloHTTP1Handler{},
|
||||
}
|
||||
|
||||
trpcS := rpc.NewServer()
|
||||
trpcS.Register(&ExampleRPCRcvr{})
|
||||
|
||||
// Use the muxed listeners for your servers.
|
||||
go grpcS.Serve(grpcL)
|
||||
go httpS.Serve(httpL)
|
||||
go trpcS.Accept(trpcL)
|
||||
|
||||
// Start serving!
|
||||
m.Serve()
|
||||
```
|
||||
|
||||
Take a look at [other examples in the GoDoc](http://godoc.org/github.com/soheilhy/cmux/#pkg-examples).
|
||||
|
||||
## Docs
|
||||
* [GoDocs](https://godoc.org/github.com/soheilhy/cmux)
|
||||
|
||||
## Performance
|
||||
There is room for improvment but, since we are only matching
|
||||
the very first bytes of a connection, the performance overheads on
|
||||
long-lived connections (i.e., RPCs and pipelined HTTP streams)
|
||||
is negligible.
|
||||
|
||||
*TODO(soheil)*: Add benchmarks.
|
||||
|
||||
## Limitations
|
||||
* *TLS*: `net/http` uses a type assertion to identify TLS connections; since
|
||||
cmux's lookahead-implementing connection wraps the underlying TLS connection,
|
||||
this type assertion fails.
|
||||
Because of that, you can serve HTTPS using cmux but `http.Request.TLS`
|
||||
would not be set in your handlers.
|
||||
|
||||
* *Different Protocols on The Same Connection*: `cmux` matches the connection
|
||||
when it's accepted. For example, one connection can be either gRPC or REST, but
|
||||
not both. That is, we assume that a client connection is either used for gRPC
|
||||
or REST.
|
||||
|
||||
* *Java gRPC Clients*: Java gRPC client blocks until it receives a SETTINGS
|
||||
frame from the server. If you are using the Java client to connect to a cmux'ed
|
||||
gRPC server please match with writers:
|
||||
```go
|
||||
grpcl := m.MatchWithWriters(cmux.HTTP2MatchHeaderFieldSendSettings("content-type", "application/grpc"))
|
||||
```
|
||||
|
||||
# Copyright and License
|
||||
Copyright 2016 The CMux Authors. All rights reserved.
|
||||
|
||||
See [CONTRIBUTORS](https://github.com/soheilhy/cmux/blob/master/CONTRIBUTORS)
|
||||
for the CMux Authors. Code is released under
|
||||
[the Apache 2 license](https://github.com/soheilhy/cmux/blob/master/LICENSE).
|
67
vendor/github.com/soheilhy/cmux/buffer.go
generated
vendored
67
vendor/github.com/soheilhy/cmux/buffer.go
generated
vendored
|
@ -1,67 +0,0 @@
|
|||
// Copyright 2016 The CMux Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package cmux
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
)
|
||||
|
||||
// bufferedReader is an optimized implementation of io.Reader that behaves like
|
||||
// ```
|
||||
// io.MultiReader(bytes.NewReader(buffer.Bytes()), io.TeeReader(source, buffer))
|
||||
// ```
|
||||
// without allocating.
|
||||
type bufferedReader struct {
|
||||
source io.Reader
|
||||
buffer bytes.Buffer
|
||||
bufferRead int
|
||||
bufferSize int
|
||||
sniffing bool
|
||||
lastErr error
|
||||
}
|
||||
|
||||
func (s *bufferedReader) Read(p []byte) (int, error) {
|
||||
if s.bufferSize > s.bufferRead {
|
||||
// If we have already read something from the buffer before, we return the
|
||||
// same data and the last error if any. We need to immediately return,
|
||||
// otherwise we may block for ever, if we try to be smart and call
|
||||
// source.Read() seeking a little bit of more data.
|
||||
bn := copy(p, s.buffer.Bytes()[s.bufferRead:s.bufferSize])
|
||||
s.bufferRead += bn
|
||||
return bn, s.lastErr
|
||||
} else if !s.sniffing && s.buffer.Cap() != 0 {
|
||||
// We don't need the buffer anymore.
|
||||
// Reset it to release the internal slice.
|
||||
s.buffer = bytes.Buffer{}
|
||||
}
|
||||
|
||||
// If there is nothing more to return in the sniffed buffer, read from the
|
||||
// source.
|
||||
sn, sErr := s.source.Read(p)
|
||||
if sn > 0 && s.sniffing {
|
||||
s.lastErr = sErr
|
||||
if wn, wErr := s.buffer.Write(p[:sn]); wErr != nil {
|
||||
return wn, wErr
|
||||
}
|
||||
}
|
||||
return sn, sErr
|
||||
}
|
||||
|
||||
func (s *bufferedReader) reset(snif bool) {
|
||||
s.sniffing = snif
|
||||
s.bufferRead = 0
|
||||
s.bufferSize = s.buffer.Len()
|
||||
}
|
270
vendor/github.com/soheilhy/cmux/cmux.go
generated
vendored
270
vendor/github.com/soheilhy/cmux/cmux.go
generated
vendored
|
@ -1,270 +0,0 @@
|
|||
// Copyright 2016 The CMux Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package cmux
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Matcher matches a connection based on its content.
|
||||
type Matcher func(io.Reader) bool
|
||||
|
||||
// MatchWriter is a match that can also write response (say to do handshake).
|
||||
type MatchWriter func(io.Writer, io.Reader) bool
|
||||
|
||||
// ErrorHandler handles an error and returns whether
|
||||
// the mux should continue serving the listener.
|
||||
type ErrorHandler func(error) bool
|
||||
|
||||
var _ net.Error = ErrNotMatched{}
|
||||
|
||||
// ErrNotMatched is returned whenever a connection is not matched by any of
|
||||
// the matchers registered in the multiplexer.
|
||||
type ErrNotMatched struct {
|
||||
c net.Conn
|
||||
}
|
||||
|
||||
func (e ErrNotMatched) Error() string {
|
||||
return fmt.Sprintf("mux: connection %v not matched by an matcher",
|
||||
e.c.RemoteAddr())
|
||||
}
|
||||
|
||||
// Temporary implements the net.Error interface.
|
||||
func (e ErrNotMatched) Temporary() bool { return true }
|
||||
|
||||
// Timeout implements the net.Error interface.
|
||||
func (e ErrNotMatched) Timeout() bool { return false }
|
||||
|
||||
type errListenerClosed string
|
||||
|
||||
func (e errListenerClosed) Error() string { return string(e) }
|
||||
func (e errListenerClosed) Temporary() bool { return false }
|
||||
func (e errListenerClosed) Timeout() bool { return false }
|
||||
|
||||
// ErrListenerClosed is returned from muxListener.Accept when the underlying
|
||||
// listener is closed.
|
||||
var ErrListenerClosed = errListenerClosed("mux: listener closed")
|
||||
|
||||
// for readability of readTimeout
|
||||
var noTimeout time.Duration
|
||||
|
||||
// New instantiates a new connection multiplexer.
|
||||
func New(l net.Listener) CMux {
|
||||
return &cMux{
|
||||
root: l,
|
||||
bufLen: 1024,
|
||||
errh: func(_ error) bool { return true },
|
||||
donec: make(chan struct{}),
|
||||
readTimeout: noTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
// CMux is a multiplexer for network connections.
|
||||
type CMux interface {
|
||||
// Match returns a net.Listener that sees (i.e., accepts) only
|
||||
// the connections matched by at least one of the matcher.
|
||||
//
|
||||
// The order used to call Match determines the priority of matchers.
|
||||
Match(...Matcher) net.Listener
|
||||
// MatchWithWriters returns a net.Listener that accepts only the
|
||||
// connections that matched by at least of the matcher writers.
|
||||
//
|
||||
// Prefer Matchers over MatchWriters, since the latter can write on the
|
||||
// connection before the actual handler.
|
||||
//
|
||||
// The order used to call Match determines the priority of matchers.
|
||||
MatchWithWriters(...MatchWriter) net.Listener
|
||||
// Serve starts multiplexing the listener. Serve blocks and perhaps
|
||||
// should be invoked concurrently within a go routine.
|
||||
Serve() error
|
||||
// HandleError registers an error handler that handles listener errors.
|
||||
HandleError(ErrorHandler)
|
||||
// sets a timeout for the read of matchers
|
||||
SetReadTimeout(time.Duration)
|
||||
}
|
||||
|
||||
type matchersListener struct {
|
||||
ss []MatchWriter
|
||||
l muxListener
|
||||
}
|
||||
|
||||
type cMux struct {
|
||||
root net.Listener
|
||||
bufLen int
|
||||
errh ErrorHandler
|
||||
donec chan struct{}
|
||||
sls []matchersListener
|
||||
readTimeout time.Duration
|
||||
}
|
||||
|
||||
func matchersToMatchWriters(matchers []Matcher) []MatchWriter {
|
||||
mws := make([]MatchWriter, 0, len(matchers))
|
||||
for _, m := range matchers {
|
||||
cm := m
|
||||
mws = append(mws, func(w io.Writer, r io.Reader) bool {
|
||||
return cm(r)
|
||||
})
|
||||
}
|
||||
return mws
|
||||
}
|
||||
|
||||
func (m *cMux) Match(matchers ...Matcher) net.Listener {
|
||||
mws := matchersToMatchWriters(matchers)
|
||||
return m.MatchWithWriters(mws...)
|
||||
}
|
||||
|
||||
func (m *cMux) MatchWithWriters(matchers ...MatchWriter) net.Listener {
|
||||
ml := muxListener{
|
||||
Listener: m.root,
|
||||
connc: make(chan net.Conn, m.bufLen),
|
||||
}
|
||||
m.sls = append(m.sls, matchersListener{ss: matchers, l: ml})
|
||||
return ml
|
||||
}
|
||||
|
||||
func (m *cMux) SetReadTimeout(t time.Duration) {
|
||||
m.readTimeout = t
|
||||
}
|
||||
|
||||
func (m *cMux) Serve() error {
|
||||
var wg sync.WaitGroup
|
||||
|
||||
defer func() {
|
||||
close(m.donec)
|
||||
wg.Wait()
|
||||
|
||||
for _, sl := range m.sls {
|
||||
close(sl.l.connc)
|
||||
// Drain the connections enqueued for the listener.
|
||||
for c := range sl.l.connc {
|
||||
_ = c.Close()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
c, err := m.root.Accept()
|
||||
if err != nil {
|
||||
if !m.handleErr(err) {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go m.serve(c, m.donec, &wg)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *cMux) serve(c net.Conn, donec <-chan struct{}, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
|
||||
muc := newMuxConn(c)
|
||||
if m.readTimeout > noTimeout {
|
||||
_ = c.SetReadDeadline(time.Now().Add(m.readTimeout))
|
||||
}
|
||||
for _, sl := range m.sls {
|
||||
for _, s := range sl.ss {
|
||||
matched := s(muc.Conn, muc.startSniffing())
|
||||
if matched {
|
||||
muc.doneSniffing()
|
||||
if m.readTimeout > noTimeout {
|
||||
_ = c.SetReadDeadline(time.Time{})
|
||||
}
|
||||
select {
|
||||
case sl.l.connc <- muc:
|
||||
case <-donec:
|
||||
_ = c.Close()
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
_ = c.Close()
|
||||
err := ErrNotMatched{c: c}
|
||||
if !m.handleErr(err) {
|
||||
_ = m.root.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (m *cMux) HandleError(h ErrorHandler) {
|
||||
m.errh = h
|
||||
}
|
||||
|
||||
func (m *cMux) handleErr(err error) bool {
|
||||
if !m.errh(err) {
|
||||
return false
|
||||
}
|
||||
|
||||
if ne, ok := err.(net.Error); ok {
|
||||
return ne.Temporary()
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
type muxListener struct {
|
||||
net.Listener
|
||||
connc chan net.Conn
|
||||
}
|
||||
|
||||
func (l muxListener) Accept() (net.Conn, error) {
|
||||
c, ok := <-l.connc
|
||||
if !ok {
|
||||
return nil, ErrListenerClosed
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// MuxConn wraps a net.Conn and provides transparent sniffing of connection data.
|
||||
type MuxConn struct {
|
||||
net.Conn
|
||||
buf bufferedReader
|
||||
}
|
||||
|
||||
func newMuxConn(c net.Conn) *MuxConn {
|
||||
return &MuxConn{
|
||||
Conn: c,
|
||||
buf: bufferedReader{source: c},
|
||||
}
|
||||
}
|
||||
|
||||
// From the io.Reader documentation:
|
||||
//
|
||||
// When Read encounters an error or end-of-file condition after
|
||||
// successfully reading n > 0 bytes, it returns the number of
|
||||
// bytes read. It may return the (non-nil) error from the same call
|
||||
// or return the error (and n == 0) from a subsequent call.
|
||||
// An instance of this general case is that a Reader returning
|
||||
// a non-zero number of bytes at the end of the input stream may
|
||||
// return either err == EOF or err == nil. The next Read should
|
||||
// return 0, EOF.
|
||||
func (m *MuxConn) Read(p []byte) (int, error) {
|
||||
return m.buf.Read(p)
|
||||
}
|
||||
|
||||
func (m *MuxConn) startSniffing() io.Reader {
|
||||
m.buf.reset(true)
|
||||
return &m.buf
|
||||
}
|
||||
|
||||
func (m *MuxConn) doneSniffing() {
|
||||
m.buf.reset(false)
|
||||
}
|
18
vendor/github.com/soheilhy/cmux/doc.go
generated
vendored
18
vendor/github.com/soheilhy/cmux/doc.go
generated
vendored
|
@ -1,18 +0,0 @@
|
|||
// Copyright 2016 The CMux Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
// Package cmux is a library to multiplex network connections based on
|
||||
// their payload. Using cmux, you can serve different protocols from the
|
||||
// same listener.
|
||||
package cmux
|
267
vendor/github.com/soheilhy/cmux/matchers.go
generated
vendored
267
vendor/github.com/soheilhy/cmux/matchers.go
generated
vendored
|
@ -1,267 +0,0 @@
|
|||
// Copyright 2016 The CMux Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package cmux
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/tls"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
)
|
||||
|
||||
// Any is a Matcher that matches any connection.
|
||||
func Any() Matcher {
|
||||
return func(r io.Reader) bool { return true }
|
||||
}
|
||||
|
||||
// PrefixMatcher returns a matcher that matches a connection if it
|
||||
// starts with any of the strings in strs.
|
||||
func PrefixMatcher(strs ...string) Matcher {
|
||||
pt := newPatriciaTreeString(strs...)
|
||||
return pt.matchPrefix
|
||||
}
|
||||
|
||||
func prefixByteMatcher(list ...[]byte) Matcher {
|
||||
pt := newPatriciaTree(list...)
|
||||
return pt.matchPrefix
|
||||
}
|
||||
|
||||
var defaultHTTPMethods = []string{
|
||||
"OPTIONS",
|
||||
"GET",
|
||||
"HEAD",
|
||||
"POST",
|
||||
"PUT",
|
||||
"DELETE",
|
||||
"TRACE",
|
||||
"CONNECT",
|
||||
}
|
||||
|
||||
// HTTP1Fast only matches the methods in the HTTP request.
|
||||
//
|
||||
// This matcher is very optimistic: if it returns true, it does not mean that
|
||||
// the request is a valid HTTP response. If you want a correct but slower HTTP1
|
||||
// matcher, use HTTP1 instead.
|
||||
func HTTP1Fast(extMethods ...string) Matcher {
|
||||
return PrefixMatcher(append(defaultHTTPMethods, extMethods...)...)
|
||||
}
|
||||
|
||||
// TLS matches HTTPS requests.
|
||||
//
|
||||
// By default, any TLS handshake packet is matched. An optional whitelist
|
||||
// of versions can be passed in to restrict the matcher, for example:
|
||||
// TLS(tls.VersionTLS11, tls.VersionTLS12)
|
||||
func TLS(versions ...int) Matcher {
|
||||
if len(versions) == 0 {
|
||||
versions = []int{
|
||||
tls.VersionSSL30,
|
||||
tls.VersionTLS10,
|
||||
tls.VersionTLS11,
|
||||
tls.VersionTLS12,
|
||||
}
|
||||
}
|
||||
prefixes := [][]byte{}
|
||||
for _, v := range versions {
|
||||
prefixes = append(prefixes, []byte{22, byte(v >> 8 & 0xff), byte(v & 0xff)})
|
||||
}
|
||||
return prefixByteMatcher(prefixes...)
|
||||
}
|
||||
|
||||
const maxHTTPRead = 4096
|
||||
|
||||
// HTTP1 parses the first line or upto 4096 bytes of the request to see if
|
||||
// the conection contains an HTTP request.
|
||||
func HTTP1() Matcher {
|
||||
return func(r io.Reader) bool {
|
||||
br := bufio.NewReader(&io.LimitedReader{R: r, N: maxHTTPRead})
|
||||
l, part, err := br.ReadLine()
|
||||
if err != nil || part {
|
||||
return false
|
||||
}
|
||||
|
||||
_, _, proto, ok := parseRequestLine(string(l))
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
v, _, ok := http.ParseHTTPVersion(proto)
|
||||
return ok && v == 1
|
||||
}
|
||||
}
|
||||
|
||||
// grabbed from net/http.
|
||||
func parseRequestLine(line string) (method, uri, proto string, ok bool) {
|
||||
s1 := strings.Index(line, " ")
|
||||
s2 := strings.Index(line[s1+1:], " ")
|
||||
if s1 < 0 || s2 < 0 {
|
||||
return
|
||||
}
|
||||
s2 += s1 + 1
|
||||
return line[:s1], line[s1+1 : s2], line[s2+1:], true
|
||||
}
|
||||
|
||||
// HTTP2 parses the frame header of the first frame to detect whether the
|
||||
// connection is an HTTP2 connection.
|
||||
func HTTP2() Matcher {
|
||||
return hasHTTP2Preface
|
||||
}
|
||||
|
||||
// HTTP1HeaderField returns a matcher matching the header fields of the first
|
||||
// request of an HTTP 1 connection.
|
||||
func HTTP1HeaderField(name, value string) Matcher {
|
||||
return func(r io.Reader) bool {
|
||||
return matchHTTP1Field(r, name, func(gotValue string) bool {
|
||||
return gotValue == value
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// HTTP1HeaderFieldPrefix returns a matcher matching the header fields of the
|
||||
// first request of an HTTP 1 connection. If the header with key name has a
|
||||
// value prefixed with valuePrefix, this will match.
|
||||
func HTTP1HeaderFieldPrefix(name, valuePrefix string) Matcher {
|
||||
return func(r io.Reader) bool {
|
||||
return matchHTTP1Field(r, name, func(gotValue string) bool {
|
||||
return strings.HasPrefix(gotValue, valuePrefix)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// HTTP2HeaderField returns a matcher matching the header fields of the first
|
||||
// headers frame.
|
||||
func HTTP2HeaderField(name, value string) Matcher {
|
||||
return func(r io.Reader) bool {
|
||||
return matchHTTP2Field(ioutil.Discard, r, name, func(gotValue string) bool {
|
||||
return gotValue == value
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// HTTP2HeaderFieldPrefix returns a matcher matching the header fields of the
|
||||
// first headers frame. If the header with key name has a value prefixed with
|
||||
// valuePrefix, this will match.
|
||||
func HTTP2HeaderFieldPrefix(name, valuePrefix string) Matcher {
|
||||
return func(r io.Reader) bool {
|
||||
return matchHTTP2Field(ioutil.Discard, r, name, func(gotValue string) bool {
|
||||
return strings.HasPrefix(gotValue, valuePrefix)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// HTTP2MatchHeaderFieldSendSettings matches the header field and writes the
|
||||
// settings to the server. Prefer HTTP2HeaderField over this one, if the client
|
||||
// does not block on receiving a SETTING frame.
|
||||
func HTTP2MatchHeaderFieldSendSettings(name, value string) MatchWriter {
|
||||
return func(w io.Writer, r io.Reader) bool {
|
||||
return matchHTTP2Field(w, r, name, func(gotValue string) bool {
|
||||
return gotValue == value
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// HTTP2MatchHeaderFieldPrefixSendSettings matches the header field prefix
|
||||
// and writes the settings to the server. Prefer HTTP2HeaderFieldPrefix over
|
||||
// this one, if the client does not block on receiving a SETTING frame.
|
||||
func HTTP2MatchHeaderFieldPrefixSendSettings(name, valuePrefix string) MatchWriter {
|
||||
return func(w io.Writer, r io.Reader) bool {
|
||||
return matchHTTP2Field(w, r, name, func(gotValue string) bool {
|
||||
return strings.HasPrefix(gotValue, valuePrefix)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func hasHTTP2Preface(r io.Reader) bool {
|
||||
var b [len(http2.ClientPreface)]byte
|
||||
last := 0
|
||||
|
||||
for {
|
||||
n, err := r.Read(b[last:])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
last += n
|
||||
eq := string(b[:last]) == http2.ClientPreface[:last]
|
||||
if last == len(http2.ClientPreface) {
|
||||
return eq
|
||||
}
|
||||
if !eq {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func matchHTTP1Field(r io.Reader, name string, matches func(string) bool) (matched bool) {
|
||||
req, err := http.ReadRequest(bufio.NewReader(r))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return matches(req.Header.Get(name))
|
||||
}
|
||||
|
||||
func matchHTTP2Field(w io.Writer, r io.Reader, name string, matches func(string) bool) (matched bool) {
|
||||
if !hasHTTP2Preface(r) {
|
||||
return false
|
||||
}
|
||||
|
||||
done := false
|
||||
framer := http2.NewFramer(w, r)
|
||||
hdec := hpack.NewDecoder(uint32(4<<10), func(hf hpack.HeaderField) {
|
||||
if hf.Name == name {
|
||||
done = true
|
||||
if matches(hf.Value) {
|
||||
matched = true
|
||||
}
|
||||
}
|
||||
})
|
||||
for {
|
||||
f, err := framer.ReadFrame()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
switch f := f.(type) {
|
||||
case *http2.SettingsFrame:
|
||||
// Sender acknoweldged the SETTINGS frame. No need to write
|
||||
// SETTINGS again.
|
||||
if f.IsAck() {
|
||||
break
|
||||
}
|
||||
if err := framer.WriteSettings(); err != nil {
|
||||
return false
|
||||
}
|
||||
case *http2.ContinuationFrame:
|
||||
if _, err := hdec.Write(f.HeaderBlockFragment()); err != nil {
|
||||
return false
|
||||
}
|
||||
done = done || f.FrameHeader.Flags&http2.FlagHeadersEndHeaders != 0
|
||||
case *http2.HeadersFrame:
|
||||
if _, err := hdec.Write(f.HeaderBlockFragment()); err != nil {
|
||||
return false
|
||||
}
|
||||
done = done || f.FrameHeader.Flags&http2.FlagHeadersEndHeaders != 0
|
||||
}
|
||||
|
||||
if done {
|
||||
return matched
|
||||
}
|
||||
}
|
||||
}
|
179
vendor/github.com/soheilhy/cmux/patricia.go
generated
vendored
179
vendor/github.com/soheilhy/cmux/patricia.go
generated
vendored
|
@ -1,179 +0,0 @@
|
|||
// Copyright 2016 The CMux Authors. All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
// implied. See the License for the specific language governing
|
||||
// permissions and limitations under the License.
|
||||
|
||||
package cmux
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
)
|
||||
|
||||
// patriciaTree is a simple patricia tree that handles []byte instead of string
|
||||
// and cannot be changed after instantiation.
|
||||
type patriciaTree struct {
|
||||
root *ptNode
|
||||
maxDepth int // max depth of the tree.
|
||||
}
|
||||
|
||||
func newPatriciaTree(bs ...[]byte) *patriciaTree {
|
||||
max := 0
|
||||
for _, b := range bs {
|
||||
if max < len(b) {
|
||||
max = len(b)
|
||||
}
|
||||
}
|
||||
return &patriciaTree{
|
||||
root: newNode(bs),
|
||||
maxDepth: max + 1,
|
||||
}
|
||||
}
|
||||
|
||||
func newPatriciaTreeString(strs ...string) *patriciaTree {
|
||||
b := make([][]byte, len(strs))
|
||||
for i, s := range strs {
|
||||
b[i] = []byte(s)
|
||||
}
|
||||
return newPatriciaTree(b...)
|
||||
}
|
||||
|
||||
func (t *patriciaTree) matchPrefix(r io.Reader) bool {
|
||||
buf := make([]byte, t.maxDepth)
|
||||
n, _ := io.ReadFull(r, buf)
|
||||
return t.root.match(buf[:n], true)
|
||||
}
|
||||
|
||||
func (t *patriciaTree) match(r io.Reader) bool {
|
||||
buf := make([]byte, t.maxDepth)
|
||||
n, _ := io.ReadFull(r, buf)
|
||||
return t.root.match(buf[:n], false)
|
||||
}
|
||||
|
||||
type ptNode struct {
|
||||
prefix []byte
|
||||
next map[byte]*ptNode
|
||||
terminal bool
|
||||
}
|
||||
|
||||
func newNode(strs [][]byte) *ptNode {
|
||||
if len(strs) == 0 {
|
||||
return &ptNode{
|
||||
prefix: []byte{},
|
||||
terminal: true,
|
||||
}
|
||||
}
|
||||
|
||||
if len(strs) == 1 {
|
||||
return &ptNode{
|
||||
prefix: strs[0],
|
||||
terminal: true,
|
||||
}
|
||||
}
|
||||
|
||||
p, strs := splitPrefix(strs)
|
||||
n := &ptNode{
|
||||
prefix: p,
|
||||
}
|
||||
|
||||
nexts := make(map[byte][][]byte)
|
||||
for _, s := range strs {
|
||||
if len(s) == 0 {
|
||||
n.terminal = true
|
||||
continue
|
||||
}
|
||||
nexts[s[0]] = append(nexts[s[0]], s[1:])
|
||||
}
|
||||
|
||||
n.next = make(map[byte]*ptNode)
|
||||
for first, rests := range nexts {
|
||||
n.next[first] = newNode(rests)
|
||||
}
|
||||
|
||||
return n
|
||||
}
|
||||
|
||||
func splitPrefix(bss [][]byte) (prefix []byte, rest [][]byte) {
|
||||
if len(bss) == 0 || len(bss[0]) == 0 {
|
||||
return prefix, bss
|
||||
}
|
||||
|
||||
if len(bss) == 1 {
|
||||
return bss[0], [][]byte{{}}
|
||||
}
|
||||
|
||||
for i := 0; ; i++ {
|
||||
var cur byte
|
||||
eq := true
|
||||
for j, b := range bss {
|
||||
if len(b) <= i {
|
||||
eq = false
|
||||
break
|
||||
}
|
||||
|
||||
if j == 0 {
|
||||
cur = b[i]
|
||||
continue
|
||||
}
|
||||
|
||||
if cur != b[i] {
|
||||
eq = false
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !eq {
|
||||
break
|
||||
}
|
||||
|
||||
prefix = append(prefix, cur)
|
||||
}
|
||||
|
||||
rest = make([][]byte, 0, len(bss))
|
||||
for _, b := range bss {
|
||||
rest = append(rest, b[len(prefix):])
|
||||
}
|
||||
|
||||
return prefix, rest
|
||||
}
|
||||
|
||||
func (n *ptNode) match(b []byte, prefix bool) bool {
|
||||
l := len(n.prefix)
|
||||
if l > 0 {
|
||||
if l > len(b) {
|
||||
l = len(b)
|
||||
}
|
||||
if !bytes.Equal(b[:l], n.prefix) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if n.terminal && (prefix || len(n.prefix) == len(b)) {
|
||||
return true
|
||||
}
|
||||
|
||||
if l >= len(b) {
|
||||
return false
|
||||
}
|
||||
|
||||
nextN, ok := n.next[b[l]]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if l == len(b) {
|
||||
b = b[l:l]
|
||||
} else {
|
||||
b = b[l+1:]
|
||||
}
|
||||
return nextN.match(b, prefix)
|
||||
}
|
237
vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
generated
vendored
237
vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go
generated
vendored
|
@ -1,237 +0,0 @@
|
|||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.25.0
|
||||
// protoc v3.12.3
|
||||
// source: google/api/httpbody.proto
|
||||
|
||||
package httpbody
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
|
||||
proto "github.com/golang/protobuf/proto"
|
||||
any "github.com/golang/protobuf/ptypes/any"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// This is a compile-time assertion that a sufficiently up-to-date version
|
||||
// of the legacy proto package is being used.
|
||||
const _ = proto.ProtoPackageIsVersion4
|
||||
|
||||
// Message that represents an arbitrary HTTP body. It should only be used for
|
||||
// payload formats that can't be represented as JSON, such as raw binary or
|
||||
// an HTML page.
|
||||
//
|
||||
//
|
||||
// This message can be used both in streaming and non-streaming API methods in
|
||||
// the request as well as the response.
|
||||
//
|
||||
// It can be used as a top-level request field, which is convenient if one
|
||||
// wants to extract parameters from either the URL or HTTP template into the
|
||||
// request fields and also want access to the raw HTTP body.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// message GetResourceRequest {
|
||||
// // A unique request id.
|
||||
// string request_id = 1;
|
||||
//
|
||||
// // The raw HTTP body is bound to this field.
|
||||
// google.api.HttpBody http_body = 2;
|
||||
// }
|
||||
//
|
||||
// service ResourceService {
|
||||
// rpc GetResource(GetResourceRequest) returns (google.api.HttpBody);
|
||||
// rpc UpdateResource(google.api.HttpBody) returns
|
||||
// (google.protobuf.Empty);
|
||||
// }
|
||||
//
|
||||
// Example with streaming methods:
|
||||
//
|
||||
// service CaldavService {
|
||||
// rpc GetCalendar(stream google.api.HttpBody)
|
||||
// returns (stream google.api.HttpBody);
|
||||
// rpc UpdateCalendar(stream google.api.HttpBody)
|
||||
// returns (stream google.api.HttpBody);
|
||||
// }
|
||||
//
|
||||
// Use of this type only changes how the request and response bodies are
|
||||
// handled, all other features will continue to work unchanged.
|
||||
type HttpBody struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The HTTP Content-Type header value specifying the content type of the body.
|
||||
ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"`
|
||||
// The HTTP request/response body as raw binary.
|
||||
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
|
||||
// Application specific response metadata. Must be set in the first response
|
||||
// for streaming APIs.
|
||||
Extensions []*any.Any `protobuf:"bytes,3,rep,name=extensions,proto3" json:"extensions,omitempty"`
|
||||
}
|
||||
|
||||
func (x *HttpBody) Reset() {
|
||||
*x = HttpBody{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_api_httpbody_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *HttpBody) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*HttpBody) ProtoMessage() {}
|
||||
|
||||
func (x *HttpBody) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_api_httpbody_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use HttpBody.ProtoReflect.Descriptor instead.
|
||||
func (*HttpBody) Descriptor() ([]byte, []int) {
|
||||
return file_google_api_httpbody_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *HttpBody) GetContentType() string {
|
||||
if x != nil {
|
||||
return x.ContentType
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *HttpBody) GetData() []byte {
|
||||
if x != nil {
|
||||
return x.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *HttpBody) GetExtensions() []*any.Any {
|
||||
if x != nil {
|
||||
return x.Extensions
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_google_api_httpbody_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_google_api_httpbody_proto_rawDesc = []byte{
|
||||
0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74,
|
||||
0x70, 0x62, 0x6f, 0x64, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x22, 0x77, 0x0a, 0x08, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21,
|
||||
0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70,
|
||||
0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52,
|
||||
0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69,
|
||||
0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
|
||||
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52,
|
||||
0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x68, 0x0a, 0x0e, 0x63,
|
||||
0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x48,
|
||||
0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b,
|
||||
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72,
|
||||
0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f,
|
||||
0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02,
|
||||
0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_google_api_httpbody_proto_rawDescOnce sync.Once
|
||||
file_google_api_httpbody_proto_rawDescData = file_google_api_httpbody_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_google_api_httpbody_proto_rawDescGZIP() []byte {
|
||||
file_google_api_httpbody_proto_rawDescOnce.Do(func() {
|
||||
file_google_api_httpbody_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_httpbody_proto_rawDescData)
|
||||
})
|
||||
return file_google_api_httpbody_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_google_api_httpbody_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_google_api_httpbody_proto_goTypes = []interface{}{
|
||||
(*HttpBody)(nil), // 0: google.api.HttpBody
|
||||
(*any.Any)(nil), // 1: google.protobuf.Any
|
||||
}
|
||||
var file_google_api_httpbody_proto_depIdxs = []int32{
|
||||
1, // 0: google.api.HttpBody.extensions:type_name -> google.protobuf.Any
|
||||
1, // [1:1] is the sub-list for method output_type
|
||||
1, // [1:1] is the sub-list for method input_type
|
||||
1, // [1:1] is the sub-list for extension type_name
|
||||
1, // [1:1] is the sub-list for extension extendee
|
||||
0, // [0:1] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_api_httpbody_proto_init() }
|
||||
func file_google_api_httpbody_proto_init() {
|
||||
if File_google_api_httpbody_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_google_api_httpbody_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*HttpBody); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_api_httpbody_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_google_api_httpbody_proto_goTypes,
|
||||
DependencyIndexes: file_google_api_httpbody_proto_depIdxs,
|
||||
MessageInfos: file_google_api_httpbody_proto_msgTypes,
|
||||
}.Build()
|
||||
File_google_api_httpbody_proto = out.File
|
||||
file_google_api_httpbody_proto_rawDesc = nil
|
||||
file_google_api_httpbody_proto_goTypes = nil
|
||||
file_google_api_httpbody_proto_depIdxs = nil
|
||||
}
|
23
vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go
generated
vendored
23
vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go
generated
vendored
|
@ -1,23 +0,0 @@
|
|||
// Copyright 2020 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package field_mask aliases all exported identifiers in
|
||||
// package "google.golang.org/protobuf/types/known/fieldmaskpb".
|
||||
package field_mask
|
||||
|
||||
import "google.golang.org/protobuf/types/known/fieldmaskpb"
|
||||
|
||||
type FieldMask = fieldmaskpb.FieldMask
|
||||
|
||||
var File_google_protobuf_field_mask_proto = fieldmaskpb.File_google_protobuf_field_mask_proto
|
371
vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
generated
vendored
371
vendor/google.golang.org/protobuf/types/known/fieldmaskpb/field_mask.pb.go
generated
vendored
|
@ -1,371 +0,0 @@
|
|||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/field_mask.proto
|
||||
|
||||
package fieldmaskpb
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
// `FieldMask` represents a set of symbolic field paths, for example:
|
||||
//
|
||||
// paths: "f.a"
|
||||
// paths: "f.b.d"
|
||||
//
|
||||
// Here `f` represents a field in some root message, `a` and `b`
|
||||
// fields in the message found in `f`, and `d` a field found in the
|
||||
// message in `f.b`.
|
||||
//
|
||||
// Field masks are used to specify a subset of fields that should be
|
||||
// returned by a get operation or modified by an update operation.
|
||||
// Field masks also have a custom JSON encoding (see below).
|
||||
//
|
||||
// # Field Masks in Projections
|
||||
//
|
||||
// When used in the context of a projection, a response message or
|
||||
// sub-message is filtered by the API to only contain those fields as
|
||||
// specified in the mask. For example, if the mask in the previous
|
||||
// example is applied to a response message as follows:
|
||||
//
|
||||
// f {
|
||||
// a : 22
|
||||
// b {
|
||||
// d : 1
|
||||
// x : 2
|
||||
// }
|
||||
// y : 13
|
||||
// }
|
||||
// z: 8
|
||||
//
|
||||
// The result will not contain specific values for fields x,y and z
|
||||
// (their value will be set to the default, and omitted in proto text
|
||||
// output):
|
||||
//
|
||||
//
|
||||
// f {
|
||||
// a : 22
|
||||
// b {
|
||||
// d : 1
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// A repeated field is not allowed except at the last position of a
|
||||
// paths string.
|
||||
//
|
||||
// If a FieldMask object is not present in a get operation, the
|
||||
// operation applies to all fields (as if a FieldMask of all fields
|
||||
// had been specified).
|
||||
//
|
||||
// Note that a field mask does not necessarily apply to the
|
||||
// top-level response message. In case of a REST get operation, the
|
||||
// field mask applies directly to the response, but in case of a REST
|
||||
// list operation, the mask instead applies to each individual message
|
||||
// in the returned resource list. In case of a REST custom method,
|
||||
// other definitions may be used. Where the mask applies will be
|
||||
// clearly documented together with its declaration in the API. In
|
||||
// any case, the effect on the returned resource/resources is required
|
||||
// behavior for APIs.
|
||||
//
|
||||
// # Field Masks in Update Operations
|
||||
//
|
||||
// A field mask in update operations specifies which fields of the
|
||||
// targeted resource are going to be updated. The API is required
|
||||
// to only change the values of the fields as specified in the mask
|
||||
// and leave the others untouched. If a resource is passed in to
|
||||
// describe the updated values, the API ignores the values of all
|
||||
// fields not covered by the mask.
|
||||
//
|
||||
// If a repeated field is specified for an update operation, new values will
|
||||
// be appended to the existing repeated field in the target resource. Note that
|
||||
// a repeated field is only allowed in the last position of a `paths` string.
|
||||
//
|
||||
// If a sub-message is specified in the last position of the field mask for an
|
||||
// update operation, then new value will be merged into the existing sub-message
|
||||
// in the target resource.
|
||||
//
|
||||
// For example, given the target message:
|
||||
//
|
||||
// f {
|
||||
// b {
|
||||
// d: 1
|
||||
// x: 2
|
||||
// }
|
||||
// c: [1]
|
||||
// }
|
||||
//
|
||||
// And an update message:
|
||||
//
|
||||
// f {
|
||||
// b {
|
||||
// d: 10
|
||||
// }
|
||||
// c: [2]
|
||||
// }
|
||||
//
|
||||
// then if the field mask is:
|
||||
//
|
||||
// paths: ["f.b", "f.c"]
|
||||
//
|
||||
// then the result will be:
|
||||
//
|
||||
// f {
|
||||
// b {
|
||||
// d: 10
|
||||
// x: 2
|
||||
// }
|
||||
// c: [1, 2]
|
||||
// }
|
||||
//
|
||||
// An implementation may provide options to override this default behavior for
|
||||
// repeated and message fields.
|
||||
//
|
||||
// In order to reset a field's value to the default, the field must
|
||||
// be in the mask and set to the default value in the provided resource.
|
||||
// Hence, in order to reset all fields of a resource, provide a default
|
||||
// instance of the resource and set all fields in the mask, or do
|
||||
// not provide a mask as described below.
|
||||
//
|
||||
// If a field mask is not present on update, the operation applies to
|
||||
// all fields (as if a field mask of all fields has been specified).
|
||||
// Note that in the presence of schema evolution, this may mean that
|
||||
// fields the client does not know and has therefore not filled into
|
||||
// the request will be reset to their default. If this is unwanted
|
||||
// behavior, a specific service may require a client to always specify
|
||||
// a field mask, producing an error if not.
|
||||
//
|
||||
// As with get operations, the location of the resource which
|
||||
// describes the updated values in the request message depends on the
|
||||
// operation kind. In any case, the effect of the field mask is
|
||||
// required to be honored by the API.
|
||||
//
|
||||
// ## Considerations for HTTP REST
|
||||
//
|
||||
// The HTTP kind of an update operation which uses a field mask must
|
||||
// be set to PATCH instead of PUT in order to satisfy HTTP semantics
|
||||
// (PUT must only be used for full updates).
|
||||
//
|
||||
// # JSON Encoding of Field Masks
|
||||
//
|
||||
// In JSON, a field mask is encoded as a single string where paths are
|
||||
// separated by a comma. Fields name in each path are converted
|
||||
// to/from lower-camel naming conventions.
|
||||
//
|
||||
// As an example, consider the following message declarations:
|
||||
//
|
||||
// message Profile {
|
||||
// User user = 1;
|
||||
// Photo photo = 2;
|
||||
// }
|
||||
// message User {
|
||||
// string display_name = 1;
|
||||
// string address = 2;
|
||||
// }
|
||||
//
|
||||
// In proto a field mask for `Profile` may look as such:
|
||||
//
|
||||
// mask {
|
||||
// paths: "user.display_name"
|
||||
// paths: "photo"
|
||||
// }
|
||||
//
|
||||
// In JSON, the same mask is represented as below:
|
||||
//
|
||||
// {
|
||||
// mask: "user.displayName,photo"
|
||||
// }
|
||||
//
|
||||
// # Field Masks and Oneof Fields
|
||||
//
|
||||
// Field masks treat fields in oneofs just as regular fields. Consider the
|
||||
// following message:
|
||||
//
|
||||
// message SampleMessage {
|
||||
// oneof test_oneof {
|
||||
// string name = 4;
|
||||
// SubMessage sub_message = 9;
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// The field mask can be:
|
||||
//
|
||||
// mask {
|
||||
// paths: "name"
|
||||
// }
|
||||
//
|
||||
// Or:
|
||||
//
|
||||
// mask {
|
||||
// paths: "sub_message"
|
||||
// }
|
||||
//
|
||||
// Note that oneof type names ("test_oneof" in this case) cannot be used in
|
||||
// paths.
|
||||
//
|
||||
// ## Field Mask Verification
|
||||
//
|
||||
// The implementation of any API method which has a FieldMask type field in the
|
||||
// request should verify the included field paths, and return an
|
||||
// `INVALID_ARGUMENT` error if any path is unmappable.
|
||||
type FieldMask struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The set of field mask paths.
|
||||
Paths []string `protobuf:"bytes,1,rep,name=paths,proto3" json:"paths,omitempty"`
|
||||
}
|
||||
|
||||
func (x *FieldMask) Reset() {
|
||||
*x = FieldMask{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *FieldMask) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*FieldMask) ProtoMessage() {}
|
||||
|
||||
func (x *FieldMask) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_protobuf_field_mask_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use FieldMask.ProtoReflect.Descriptor instead.
|
||||
func (*FieldMask) Descriptor() ([]byte, []int) {
|
||||
return file_google_protobuf_field_mask_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *FieldMask) GetPaths() []string {
|
||||
if x != nil {
|
||||
return x.Paths
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_google_protobuf_field_mask_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_google_protobuf_field_mask_proto_rawDesc = []byte{
|
||||
0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
||||
0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x62, 0x75, 0x66, 0x22, 0x21, 0x0a, 0x09, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b,
|
||||
0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52,
|
||||
0x05, 0x70, 0x61, 0x74, 0x68, 0x73, 0x42, 0x8c, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e,
|
||||
0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
|
||||
0x5a, 0x39, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e,
|
||||
0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b,
|
||||
0x3b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0xf8, 0x01, 0x01, 0xa2, 0x02,
|
||||
0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e,
|
||||
0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_google_protobuf_field_mask_proto_rawDescOnce sync.Once
|
||||
file_google_protobuf_field_mask_proto_rawDescData = file_google_protobuf_field_mask_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_google_protobuf_field_mask_proto_rawDescGZIP() []byte {
|
||||
file_google_protobuf_field_mask_proto_rawDescOnce.Do(func() {
|
||||
file_google_protobuf_field_mask_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_field_mask_proto_rawDescData)
|
||||
})
|
||||
return file_google_protobuf_field_mask_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_google_protobuf_field_mask_proto_msgTypes = make([]protoimpl.MessageInfo, 1)
|
||||
var file_google_protobuf_field_mask_proto_goTypes = []interface{}{
|
||||
(*FieldMask)(nil), // 0: google.protobuf.FieldMask
|
||||
}
|
||||
var file_google_protobuf_field_mask_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_protobuf_field_mask_proto_init() }
|
||||
func file_google_protobuf_field_mask_proto_init() {
|
||||
if File_google_protobuf_field_mask_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_google_protobuf_field_mask_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*FieldMask); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_protobuf_field_mask_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 1,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_google_protobuf_field_mask_proto_goTypes,
|
||||
DependencyIndexes: file_google_protobuf_field_mask_proto_depIdxs,
|
||||
MessageInfos: file_google_protobuf_field_mask_proto_msgTypes,
|
||||
}.Build()
|
||||
File_google_protobuf_field_mask_proto = out.File
|
||||
file_google_protobuf_field_mask_proto_rawDesc = nil
|
||||
file_google_protobuf_field_mask_proto_goTypes = nil
|
||||
file_google_protobuf_field_mask_proto_depIdxs = nil
|
||||
}
|
714
vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
generated
vendored
714
vendor/google.golang.org/protobuf/types/known/wrapperspb/wrappers.pb.go
generated
vendored
|
@ -1,714 +0,0 @@
|
|||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
// https://developers.google.com/protocol-buffers/
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
// Wrappers for primitive (non-message) types. These types are useful
|
||||
// for embedding primitives in the `google.protobuf.Any` type and for places
|
||||
// where we need to distinguish between the absence of a primitive
|
||||
// typed field and its default value.
|
||||
//
|
||||
// These wrappers have no meaningful use within repeated fields as they lack
|
||||
// the ability to detect presence on individual elements.
|
||||
// These wrappers have no meaningful use within a map or a oneof since
|
||||
// individual entries of a map or fields of a oneof can already detect presence.
|
||||
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// source: google/protobuf/wrappers.proto
|
||||
|
||||
package wrapperspb
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
// Wrapper message for `double`.
|
||||
//
|
||||
// The JSON representation for `DoubleValue` is JSON number.
|
||||
type DoubleValue struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The double value.
|
||||
Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (x *DoubleValue) Reset() {
|
||||
*x = DoubleValue{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *DoubleValue) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*DoubleValue) ProtoMessage() {}
|
||||
|
||||
func (x *DoubleValue) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_protobuf_wrappers_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use DoubleValue.ProtoReflect.Descriptor instead.
|
||||
func (*DoubleValue) Descriptor() ([]byte, []int) {
|
||||
return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *DoubleValue) GetValue() float64 {
|
||||
if x != nil {
|
||||
return x.Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Wrapper message for `float`.
|
||||
//
|
||||
// The JSON representation for `FloatValue` is JSON number.
|
||||
type FloatValue struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The float value.
|
||||
Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (x *FloatValue) Reset() {
|
||||
*x = FloatValue{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *FloatValue) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*FloatValue) ProtoMessage() {}
|
||||
|
||||
func (x *FloatValue) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_protobuf_wrappers_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use FloatValue.ProtoReflect.Descriptor instead.
|
||||
func (*FloatValue) Descriptor() ([]byte, []int) {
|
||||
return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *FloatValue) GetValue() float32 {
|
||||
if x != nil {
|
||||
return x.Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Wrapper message for `int64`.
|
||||
//
|
||||
// The JSON representation for `Int64Value` is JSON string.
|
||||
type Int64Value struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The int64 value.
|
||||
Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Int64Value) Reset() {
|
||||
*x = Int64Value{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Int64Value) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Int64Value) ProtoMessage() {}
|
||||
|
||||
func (x *Int64Value) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_protobuf_wrappers_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Int64Value.ProtoReflect.Descriptor instead.
|
||||
func (*Int64Value) Descriptor() ([]byte, []int) {
|
||||
return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *Int64Value) GetValue() int64 {
|
||||
if x != nil {
|
||||
return x.Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Wrapper message for `uint64`.
|
||||
//
|
||||
// The JSON representation for `UInt64Value` is JSON string.
|
||||
type UInt64Value struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The uint64 value.
|
||||
Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (x *UInt64Value) Reset() {
|
||||
*x = UInt64Value{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *UInt64Value) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*UInt64Value) ProtoMessage() {}
|
||||
|
||||
func (x *UInt64Value) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_protobuf_wrappers_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use UInt64Value.ProtoReflect.Descriptor instead.
|
||||
func (*UInt64Value) Descriptor() ([]byte, []int) {
|
||||
return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *UInt64Value) GetValue() uint64 {
|
||||
if x != nil {
|
||||
return x.Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Wrapper message for `int32`.
|
||||
//
|
||||
// The JSON representation for `Int32Value` is JSON number.
|
||||
type Int32Value struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The int32 value.
|
||||
Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Int32Value) Reset() {
|
||||
*x = Int32Value{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Int32Value) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Int32Value) ProtoMessage() {}
|
||||
|
||||
func (x *Int32Value) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_protobuf_wrappers_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Int32Value.ProtoReflect.Descriptor instead.
|
||||
func (*Int32Value) Descriptor() ([]byte, []int) {
|
||||
return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{4}
|
||||
}
|
||||
|
||||
func (x *Int32Value) GetValue() int32 {
|
||||
if x != nil {
|
||||
return x.Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Wrapper message for `uint32`.
|
||||
//
|
||||
// The JSON representation for `UInt32Value` is JSON number.
|
||||
type UInt32Value struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The uint32 value.
|
||||
Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (x *UInt32Value) Reset() {
|
||||
*x = UInt32Value{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *UInt32Value) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*UInt32Value) ProtoMessage() {}
|
||||
|
||||
func (x *UInt32Value) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_protobuf_wrappers_proto_msgTypes[5]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use UInt32Value.ProtoReflect.Descriptor instead.
|
||||
func (*UInt32Value) Descriptor() ([]byte, []int) {
|
||||
return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{5}
|
||||
}
|
||||
|
||||
func (x *UInt32Value) GetValue() uint32 {
|
||||
if x != nil {
|
||||
return x.Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Wrapper message for `bool`.
|
||||
//
|
||||
// The JSON representation for `BoolValue` is JSON `true` and `false`.
|
||||
type BoolValue struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The bool value.
|
||||
Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (x *BoolValue) Reset() {
|
||||
*x = BoolValue{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *BoolValue) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*BoolValue) ProtoMessage() {}
|
||||
|
||||
func (x *BoolValue) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_protobuf_wrappers_proto_msgTypes[6]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use BoolValue.ProtoReflect.Descriptor instead.
|
||||
func (*BoolValue) Descriptor() ([]byte, []int) {
|
||||
return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{6}
|
||||
}
|
||||
|
||||
func (x *BoolValue) GetValue() bool {
|
||||
if x != nil {
|
||||
return x.Value
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Wrapper message for `string`.
|
||||
//
|
||||
// The JSON representation for `StringValue` is JSON string.
|
||||
type StringValue struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The string value.
|
||||
Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (x *StringValue) Reset() {
|
||||
*x = StringValue{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *StringValue) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*StringValue) ProtoMessage() {}
|
||||
|
||||
func (x *StringValue) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_protobuf_wrappers_proto_msgTypes[7]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use StringValue.ProtoReflect.Descriptor instead.
|
||||
func (*StringValue) Descriptor() ([]byte, []int) {
|
||||
return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{7}
|
||||
}
|
||||
|
||||
func (x *StringValue) GetValue() string {
|
||||
if x != nil {
|
||||
return x.Value
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Wrapper message for `bytes`.
|
||||
//
|
||||
// The JSON representation for `BytesValue` is JSON string.
|
||||
type BytesValue struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// The bytes value.
|
||||
Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (x *BytesValue) Reset() {
|
||||
*x = BytesValue{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *BytesValue) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*BytesValue) ProtoMessage() {}
|
||||
|
||||
func (x *BytesValue) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_google_protobuf_wrappers_proto_msgTypes[8]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use BytesValue.ProtoReflect.Descriptor instead.
|
||||
func (*BytesValue) Descriptor() ([]byte, []int) {
|
||||
return file_google_protobuf_wrappers_proto_rawDescGZIP(), []int{8}
|
||||
}
|
||||
|
||||
func (x *BytesValue) GetValue() []byte {
|
||||
if x != nil {
|
||||
return x.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_google_protobuf_wrappers_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_google_protobuf_wrappers_proto_rawDesc = []byte{
|
||||
0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
||||
0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
||||
0x66, 0x22, 0x23, 0x0a, 0x0b, 0x44, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52,
|
||||
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x56,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x02, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e,
|
||||
0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23,
|
||||
0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a,
|
||||
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x76, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x56, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05,
|
||||
0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x23, 0x0a, 0x0b, 0x55, 0x49, 0x6e, 0x74, 0x33,
|
||||
0x32, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
|
||||
0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x21, 0x0a, 0x09,
|
||||
0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c,
|
||||
0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22,
|
||||
0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x14,
|
||||
0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x22, 0x22, 0x0a, 0x0a, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c,
|
||||
0x75, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
|
||||
0x0c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x7c, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e,
|
||||
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42,
|
||||
0x0d, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01,
|
||||
0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
|
||||
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
|
||||
0x70, 0x65, 0x73, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0xf8, 0x01, 0x01, 0xa2,
|
||||
0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50,
|
||||
0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77,
|
||||
0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_google_protobuf_wrappers_proto_rawDescOnce sync.Once
|
||||
file_google_protobuf_wrappers_proto_rawDescData = file_google_protobuf_wrappers_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_google_protobuf_wrappers_proto_rawDescGZIP() []byte {
|
||||
file_google_protobuf_wrappers_proto_rawDescOnce.Do(func() {
|
||||
file_google_protobuf_wrappers_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_wrappers_proto_rawDescData)
|
||||
})
|
||||
return file_google_protobuf_wrappers_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_google_protobuf_wrappers_proto_msgTypes = make([]protoimpl.MessageInfo, 9)
|
||||
var file_google_protobuf_wrappers_proto_goTypes = []interface{}{
|
||||
(*DoubleValue)(nil), // 0: google.protobuf.DoubleValue
|
||||
(*FloatValue)(nil), // 1: google.protobuf.FloatValue
|
||||
(*Int64Value)(nil), // 2: google.protobuf.Int64Value
|
||||
(*UInt64Value)(nil), // 3: google.protobuf.UInt64Value
|
||||
(*Int32Value)(nil), // 4: google.protobuf.Int32Value
|
||||
(*UInt32Value)(nil), // 5: google.protobuf.UInt32Value
|
||||
(*BoolValue)(nil), // 6: google.protobuf.BoolValue
|
||||
(*StringValue)(nil), // 7: google.protobuf.StringValue
|
||||
(*BytesValue)(nil), // 8: google.protobuf.BytesValue
|
||||
}
|
||||
var file_google_protobuf_wrappers_proto_depIdxs = []int32{
|
||||
0, // [0:0] is the sub-list for method output_type
|
||||
0, // [0:0] is the sub-list for method input_type
|
||||
0, // [0:0] is the sub-list for extension type_name
|
||||
0, // [0:0] is the sub-list for extension extendee
|
||||
0, // [0:0] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_google_protobuf_wrappers_proto_init() }
|
||||
func file_google_protobuf_wrappers_proto_init() {
|
||||
if File_google_protobuf_wrappers_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_google_protobuf_wrappers_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*DoubleValue); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_google_protobuf_wrappers_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*FloatValue); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_google_protobuf_wrappers_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Int64Value); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_google_protobuf_wrappers_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*UInt64Value); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_google_protobuf_wrappers_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Int32Value); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_google_protobuf_wrappers_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*UInt32Value); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_google_protobuf_wrappers_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*BoolValue); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_google_protobuf_wrappers_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*StringValue); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_google_protobuf_wrappers_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*BytesValue); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_google_protobuf_wrappers_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 9,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_google_protobuf_wrappers_proto_goTypes,
|
||||
DependencyIndexes: file_google_protobuf_wrappers_proto_depIdxs,
|
||||
MessageInfos: file_google_protobuf_wrappers_proto_msgTypes,
|
||||
}.Build()
|
||||
File_google_protobuf_wrappers_proto = out.File
|
||||
file_google_protobuf_wrappers_proto_rawDesc = nil
|
||||
file_google_protobuf_wrappers_proto_goTypes = nil
|
||||
file_google_protobuf_wrappers_proto_depIdxs = nil
|
||||
}
|
11
vendor/modules.txt
vendored
11
vendor/modules.txt
vendored
|
@ -209,7 +209,6 @@ github.com/gogo/protobuf/protoc-gen-gogo/grpc
|
|||
github.com/gogo/protobuf/protoc-gen-gogo/plugin
|
||||
github.com/gogo/protobuf/protoc-gen-gogofast
|
||||
github.com/gogo/protobuf/sortkeys
|
||||
github.com/gogo/protobuf/types
|
||||
github.com/gogo/protobuf/vanity
|
||||
github.com/gogo/protobuf/vanity/command
|
||||
# github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
|
||||
|
@ -227,7 +226,6 @@ github.com/golang/protobuf/ptypes/any
|
|||
github.com/golang/protobuf/ptypes/duration
|
||||
github.com/golang/protobuf/ptypes/struct
|
||||
github.com/golang/protobuf/ptypes/timestamp
|
||||
github.com/golang/protobuf/ptypes/wrappers
|
||||
# github.com/golang/snappy v0.0.1
|
||||
## explicit
|
||||
github.com/golang/snappy
|
||||
|
@ -278,7 +276,6 @@ github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/internal/gengatew
|
|||
github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
|
||||
github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger
|
||||
github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options
|
||||
github.com/grpc-ecosystem/grpc-gateway/runtime
|
||||
github.com/grpc-ecosystem/grpc-gateway/utilities
|
||||
# github.com/hashicorp/consul/api v1.6.0
|
||||
## explicit
|
||||
|
@ -417,9 +414,6 @@ github.com/shurcooL/httpfs/vfsutil
|
|||
github.com/shurcooL/vfsgen
|
||||
# github.com/sirupsen/logrus v1.6.0
|
||||
github.com/sirupsen/logrus
|
||||
# github.com/soheilhy/cmux v0.1.4
|
||||
## explicit
|
||||
github.com/soheilhy/cmux
|
||||
# github.com/uber/jaeger-client-go v2.25.0+incompatible
|
||||
## explicit
|
||||
github.com/uber/jaeger-client-go
|
||||
|
@ -576,11 +570,8 @@ google.golang.org/appengine/urlfetch
|
|||
# google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70
|
||||
## explicit
|
||||
google.golang.org/genproto/googleapis/api/annotations
|
||||
google.golang.org/genproto/googleapis/api/httpbody
|
||||
google.golang.org/genproto/googleapis/rpc/status
|
||||
google.golang.org/genproto/protobuf/field_mask
|
||||
# google.golang.org/grpc v1.29.1
|
||||
## explicit
|
||||
google.golang.org/grpc
|
||||
google.golang.org/grpc/attributes
|
||||
google.golang.org/grpc/backoff
|
||||
|
@ -655,10 +646,8 @@ google.golang.org/protobuf/runtime/protoimpl
|
|||
google.golang.org/protobuf/types/descriptorpb
|
||||
google.golang.org/protobuf/types/known/anypb
|
||||
google.golang.org/protobuf/types/known/durationpb
|
||||
google.golang.org/protobuf/types/known/fieldmaskpb
|
||||
google.golang.org/protobuf/types/known/structpb
|
||||
google.golang.org/protobuf/types/known/timestamppb
|
||||
google.golang.org/protobuf/types/known/wrapperspb
|
||||
google.golang.org/protobuf/types/pluginpb
|
||||
# gopkg.in/alecthomas/kingpin.v2 v2.2.6
|
||||
## explicit
|
||||
|
|
|
@ -1,230 +0,0 @@
|
|||
// Copyright 2017 The Prometheus Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package apiv2
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/grpc-ecosystem/grpc-gateway/runtime"
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
|
||||
"github.com/prometheus/prometheus/pkg/labels"
|
||||
"github.com/prometheus/prometheus/pkg/timestamp"
|
||||
pb "github.com/prometheus/prometheus/prompb"
|
||||
"github.com/prometheus/prometheus/tsdb"
|
||||
)
|
||||
|
||||
// API encapsulates all API services.
|
||||
type API struct {
|
||||
enableAdmin bool
|
||||
db TSDBAdmin
|
||||
dbDir string
|
||||
}
|
||||
|
||||
// New returns a new API object.
|
||||
func New(
|
||||
db TSDBAdmin,
|
||||
dbDir string,
|
||||
enableAdmin bool,
|
||||
) *API {
|
||||
return &API{
|
||||
db: db,
|
||||
dbDir: dbDir,
|
||||
enableAdmin: enableAdmin,
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterGRPC registers all API services with the given server.
|
||||
func (api *API) RegisterGRPC(srv *grpc.Server) {
|
||||
if api.enableAdmin {
|
||||
pb.RegisterAdminServer(srv, NewAdmin(api.db, api.dbDir))
|
||||
} else {
|
||||
pb.RegisterAdminServer(srv, &AdminDisabled{})
|
||||
}
|
||||
}
|
||||
|
||||
// HTTPHandler returns an HTTP handler for a REST API gateway to the given grpc address.
|
||||
func (api *API) HTTPHandler(ctx context.Context, grpcAddr string) (http.Handler, error) {
|
||||
enc := new(runtime.JSONPb)
|
||||
mux := runtime.NewServeMux(runtime.WithMarshalerOption(enc.ContentType(), enc))
|
||||
|
||||
opts := []grpc.DialOption{
|
||||
grpc.WithInsecure(),
|
||||
// Replace the default dialer that connects through proxy when HTTP_PROXY is set.
|
||||
grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return (&net.Dialer{}).DialContext(ctx, "tcp", addr)
|
||||
}),
|
||||
}
|
||||
|
||||
err := pb.RegisterAdminHandlerFromEndpoint(ctx, mux, grpcAddr, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return mux, nil
|
||||
}
|
||||
|
||||
// extractTimeRange returns minimum and maximum timestamp in milliseconds as
|
||||
// provided by the time range. It defaults either boundary to the minimum and maximum
|
||||
// possible value.
|
||||
func extractTimeRange(min, max *time.Time) (mint, maxt time.Time, err error) {
|
||||
if min == nil {
|
||||
mint = minTime
|
||||
} else {
|
||||
mint = *min
|
||||
}
|
||||
if max == nil {
|
||||
maxt = maxTime
|
||||
} else {
|
||||
maxt = *max
|
||||
}
|
||||
if mint.After(maxt) {
|
||||
return mint, maxt, errors.Errorf("min time must be before or equal to max time")
|
||||
}
|
||||
return mint, maxt, nil
|
||||
}
|
||||
|
||||
var (
|
||||
minTime = time.Unix(math.MinInt64/1000+62135596801, 0).UTC()
|
||||
maxTime = time.Unix(math.MaxInt64/1000-62135596801, 999999999).UTC()
|
||||
)
|
||||
|
||||
var (
|
||||
errAdminDisabled = status.Error(codes.Unavailable, "Admin APIs are disabled")
|
||||
errTSDBNotReady = status.Error(codes.Unavailable, "TSDB not ready")
|
||||
)
|
||||
|
||||
// AdminDisabled implements the administration interface that informs
|
||||
// that the API endpoints are disabled.
|
||||
type AdminDisabled struct {
|
||||
}
|
||||
|
||||
// TSDBSnapshot implements pb.AdminServer.
|
||||
func (s *AdminDisabled) TSDBSnapshot(_ context.Context, _ *pb.TSDBSnapshotRequest) (*pb.TSDBSnapshotResponse, error) {
|
||||
return nil, errAdminDisabled
|
||||
}
|
||||
|
||||
// TSDBCleanTombstones implements pb.AdminServer.
|
||||
func (s *AdminDisabled) TSDBCleanTombstones(_ context.Context, _ *pb.TSDBCleanTombstonesRequest) (*pb.TSDBCleanTombstonesResponse, error) {
|
||||
return nil, errAdminDisabled
|
||||
}
|
||||
|
||||
// DeleteSeries implements pb.AdminServer.
|
||||
func (s *AdminDisabled) DeleteSeries(_ context.Context, r *pb.SeriesDeleteRequest) (*pb.SeriesDeleteResponse, error) {
|
||||
return nil, errAdminDisabled
|
||||
}
|
||||
|
||||
// TSDBAdmin defines the tsdb interfaces used by the v1 API for admin operations as well as statistics.
|
||||
type TSDBAdmin interface {
|
||||
CleanTombstones() error
|
||||
Delete(mint, maxt int64, ms ...*labels.Matcher) error
|
||||
Snapshot(dir string, withHead bool) error
|
||||
}
|
||||
|
||||
// Admin provides an administration interface to Prometheus.
|
||||
type Admin struct {
|
||||
db TSDBAdmin
|
||||
dbDir string
|
||||
}
|
||||
|
||||
// NewAdmin returns a Admin server.
|
||||
func NewAdmin(db TSDBAdmin, dbDir string) *Admin {
|
||||
return &Admin{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
// TSDBSnapshot implements pb.AdminServer.
|
||||
func (s *Admin) TSDBSnapshot(_ context.Context, req *pb.TSDBSnapshotRequest) (*pb.TSDBSnapshotResponse, error) {
|
||||
var (
|
||||
snapdir = filepath.Join(s.dbDir, "snapshots")
|
||||
name = fmt.Sprintf("%s-%x",
|
||||
time.Now().UTC().Format("20060102T150405Z0700"),
|
||||
rand.Int())
|
||||
dir = filepath.Join(snapdir, name)
|
||||
)
|
||||
if err := os.MkdirAll(dir, 0777); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "created snapshot directory: %s", err)
|
||||
}
|
||||
if err := s.db.Snapshot(dir, !req.SkipHead); err != nil {
|
||||
if errors.Cause(err) == tsdb.ErrNotReady {
|
||||
return nil, errTSDBNotReady
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.Internal, "create snapshot: %s", err)
|
||||
}
|
||||
return &pb.TSDBSnapshotResponse{Name: name}, nil
|
||||
}
|
||||
|
||||
// TSDBCleanTombstones implements pb.AdminServer.
|
||||
func (s *Admin) TSDBCleanTombstones(_ context.Context, _ *pb.TSDBCleanTombstonesRequest) (*pb.TSDBCleanTombstonesResponse, error) {
|
||||
if err := s.db.CleanTombstones(); err != nil {
|
||||
if errors.Cause(err) == tsdb.ErrNotReady {
|
||||
return nil, errTSDBNotReady
|
||||
}
|
||||
return nil, status.Errorf(codes.Internal, "clean tombstones: %s", err)
|
||||
}
|
||||
|
||||
return &pb.TSDBCleanTombstonesResponse{}, nil
|
||||
}
|
||||
|
||||
// DeleteSeries implements pb.AdminServer.
|
||||
func (s *Admin) DeleteSeries(_ context.Context, r *pb.SeriesDeleteRequest) (*pb.SeriesDeleteResponse, error) {
|
||||
mint, maxt, err := extractTimeRange(r.MinTime, r.MaxTime)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
var matchers []*labels.Matcher
|
||||
|
||||
for _, m := range r.Matchers {
|
||||
var lm *labels.Matcher
|
||||
var err error
|
||||
|
||||
switch m.Type {
|
||||
case pb.LabelMatcher_EQ:
|
||||
lm, err = labels.NewMatcher(labels.MatchEqual, m.Name, m.Value)
|
||||
case pb.LabelMatcher_NEQ:
|
||||
lm, err = labels.NewMatcher(labels.MatchNotEqual, m.Name, m.Value)
|
||||
case pb.LabelMatcher_RE:
|
||||
lm, err = labels.NewMatcher(labels.MatchRegexp, m.Name, m.Value)
|
||||
case pb.LabelMatcher_NRE:
|
||||
lm, err = labels.NewMatcher(labels.MatchNotRegexp, m.Name, m.Value)
|
||||
default:
|
||||
return nil, status.Error(codes.InvalidArgument, "unknown matcher type")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "bad matcher: %s", err)
|
||||
}
|
||||
|
||||
matchers = append(matchers, lm)
|
||||
}
|
||||
if err := s.db.Delete(timestamp.FromTime(mint), timestamp.FromTime(maxt), matchers...); err != nil {
|
||||
if errors.Cause(err) == tsdb.ErrNotReady {
|
||||
return nil, errTSDBNotReady
|
||||
}
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
return &pb.SeriesDeleteResponse{}, nil
|
||||
}
|
65
web/web.go
65
web/web.go
|
@ -50,10 +50,8 @@ import (
|
|||
"github.com/prometheus/common/model"
|
||||
"github.com/prometheus/common/route"
|
||||
"github.com/prometheus/common/server"
|
||||
"github.com/soheilhy/cmux"
|
||||
"go.uber.org/atomic"
|
||||
"golang.org/x/net/netutil"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/prometheus/prometheus/config"
|
||||
"github.com/prometheus/prometheus/notifier"
|
||||
|
@ -66,7 +64,6 @@ import (
|
|||
"github.com/prometheus/prometheus/tsdb/index"
|
||||
"github.com/prometheus/prometheus/util/httputil"
|
||||
api_v1 "github.com/prometheus/prometheus/web/api/v1"
|
||||
api_v2 "github.com/prometheus/prometheus/web/api/v2"
|
||||
"github.com/prometheus/prometheus/web/ui"
|
||||
)
|
||||
|
||||
|
@ -503,11 +500,6 @@ func (h *Handler) testReady(f http.HandlerFunc) http.HandlerFunc {
|
|||
}
|
||||
}
|
||||
|
||||
// Checks if server is ready, calls f if it is, returns 503 if it is not.
|
||||
func (h *Handler) testReadyHandler(f http.Handler) http.HandlerFunc {
|
||||
return h.testReady(f.ServeHTTP)
|
||||
}
|
||||
|
||||
// Quit returns the receive-only quit channel.
|
||||
func (h *Handler) Quit() <-chan struct{} {
|
||||
return h.quitCh
|
||||
|
@ -533,31 +525,6 @@ func (h *Handler) Run(ctx context.Context) error {
|
|||
conntrack.TrackWithName("http"),
|
||||
conntrack.TrackWithTracing())
|
||||
|
||||
var (
|
||||
m = cmux.New(listener)
|
||||
// See https://github.com/grpc/grpc-go/issues/2636 for why we need to use MatchWithWriters().
|
||||
grpcl = m.MatchWithWriters(cmux.HTTP2MatchHeaderFieldSendSettings("content-type", "application/grpc"))
|
||||
httpl = m.Match(cmux.HTTP1Fast())
|
||||
grpcSrv = grpc.NewServer()
|
||||
)
|
||||
|
||||
// Prevent open connections to block the shutdown of the handler.
|
||||
m.SetReadTimeout(h.options.ReadTimeout)
|
||||
|
||||
av2 := api_v2.New(
|
||||
h.options.LocalStorage,
|
||||
h.options.TSDBDir,
|
||||
h.options.EnableAdminAPI,
|
||||
)
|
||||
av2.RegisterGRPC(grpcSrv)
|
||||
|
||||
hh, err := av2.HTTPHandler(ctx, h.options.ListenAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hhFunc := h.testReadyHandler(hh)
|
||||
|
||||
operationName := nethttp.OperationNameFunc(func(r *http.Request) string {
|
||||
return fmt.Sprintf("%s %s", r.Method, r.URL.Path)
|
||||
})
|
||||
|
@ -576,13 +543,6 @@ func (h *Handler) Run(ctx context.Context) error {
|
|||
|
||||
mux.Handle(apiPath+"/v1/", http.StripPrefix(apiPath+"/v1", av1))
|
||||
|
||||
mux.Handle(apiPath+"/", http.StripPrefix(apiPath,
|
||||
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
httputil.SetCORS(w, h.options.CORSOrigin, r)
|
||||
hhFunc(w, r)
|
||||
}),
|
||||
))
|
||||
|
||||
errlog := stdlog.New(log.NewStdlibAdapter(level.Error(h.logger)), "", 0)
|
||||
|
||||
httpSrv := &http.Server{
|
||||
|
@ -593,13 +553,7 @@ func (h *Handler) Run(ctx context.Context) error {
|
|||
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
errCh <- httpSrv.Serve(httpl)
|
||||
}()
|
||||
go func() {
|
||||
errCh <- grpcSrv.Serve(grpcl)
|
||||
}()
|
||||
go func() {
|
||||
errCh <- m.Serve()
|
||||
errCh <- httpSrv.Serve(listener)
|
||||
}()
|
||||
|
||||
select {
|
||||
|
@ -607,27 +561,10 @@ func (h *Handler) Run(ctx context.Context) error {
|
|||
return e
|
||||
case <-ctx.Done():
|
||||
httpSrv.Shutdown(ctx)
|
||||
stopGRPCSrv(grpcSrv)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// stopGRPCSrv stops a given GRPC server. An attempt to stop the server
|
||||
// gracefully is made first. After 15s, the server to forced to stop.
|
||||
func stopGRPCSrv(srv *grpc.Server) {
|
||||
stop := make(chan struct{})
|
||||
go func() {
|
||||
srv.GracefulStop()
|
||||
close(stop)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-time.After(15 * time.Second):
|
||||
srv.Stop()
|
||||
case <-stop:
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) alerts(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
var groups []*rules.Group
|
||||
|
|
|
@ -177,12 +177,12 @@ func TestReadyAndHealthy(t *testing.T) {
|
|||
cleanupTestResponse(t, resp)
|
||||
}
|
||||
|
||||
resp, err = http.Post("http://localhost:9090/api/v2/admin/tsdb/snapshot", "", strings.NewReader(""))
|
||||
resp, err = http.Post("http://localhost:9090/api/v1/admin/tsdb/snapshot", "", strings.NewReader(""))
|
||||
testutil.Ok(t, err)
|
||||
testutil.Equals(t, http.StatusServiceUnavailable, resp.StatusCode)
|
||||
cleanupTestResponse(t, resp)
|
||||
|
||||
resp, err = http.Post("http://localhost:9090/api/v2/admin/tsdb/delete_series", "", strings.NewReader("{}"))
|
||||
resp, err = http.Post("http://localhost:9090/api/v1/admin/tsdb/delete_series", "", strings.NewReader("{}"))
|
||||
testutil.Ok(t, err)
|
||||
testutil.Equals(t, http.StatusServiceUnavailable, resp.StatusCode)
|
||||
cleanupTestResponse(t, resp)
|
||||
|
@ -208,15 +208,15 @@ func TestReadyAndHealthy(t *testing.T) {
|
|||
cleanupTestResponse(t, resp)
|
||||
}
|
||||
|
||||
resp, err = http.Post("http://localhost:9090/api/v2/admin/tsdb/snapshot", "", strings.NewReader(""))
|
||||
resp, err = http.Post("http://localhost:9090/api/v1/admin/tsdb/snapshot", "", strings.NewReader(""))
|
||||
testutil.Ok(t, err)
|
||||
testutil.Equals(t, http.StatusOK, resp.StatusCode)
|
||||
cleanupSnapshot(t, resp)
|
||||
cleanupSnapshot(t, dbDir, resp)
|
||||
cleanupTestResponse(t, resp)
|
||||
|
||||
resp, err = http.Post("http://localhost:9090/api/v2/admin/tsdb/delete_series", "", strings.NewReader("{}"))
|
||||
resp, err = http.Post("http://localhost:9090/api/v1/admin/tsdb/delete_series?match[]=up", "", nil)
|
||||
testutil.Ok(t, err)
|
||||
testutil.Equals(t, http.StatusOK, resp.StatusCode)
|
||||
testutil.Equals(t, http.StatusNoContent, resp.StatusCode)
|
||||
cleanupTestResponse(t, resp)
|
||||
}
|
||||
|
||||
|
@ -280,12 +280,12 @@ func TestRoutePrefix(t *testing.T) {
|
|||
testutil.Equals(t, http.StatusServiceUnavailable, resp.StatusCode)
|
||||
cleanupTestResponse(t, resp)
|
||||
|
||||
resp, err = http.Post("http://localhost:9091"+opts.RoutePrefix+"/api/v2/admin/tsdb/snapshot", "", strings.NewReader(""))
|
||||
resp, err = http.Post("http://localhost:9091"+opts.RoutePrefix+"/api/v1/admin/tsdb/snapshot", "", strings.NewReader(""))
|
||||
testutil.Ok(t, err)
|
||||
testutil.Equals(t, http.StatusServiceUnavailable, resp.StatusCode)
|
||||
cleanupTestResponse(t, resp)
|
||||
|
||||
resp, err = http.Post("http://localhost:9091"+opts.RoutePrefix+"/api/v2/admin/tsdb/delete_series", "", strings.NewReader("{}"))
|
||||
resp, err = http.Post("http://localhost:9091"+opts.RoutePrefix+"/api/v1/admin/tsdb/delete_series", "", strings.NewReader("{}"))
|
||||
testutil.Ok(t, err)
|
||||
testutil.Equals(t, http.StatusServiceUnavailable, resp.StatusCode)
|
||||
cleanupTestResponse(t, resp)
|
||||
|
@ -308,15 +308,15 @@ func TestRoutePrefix(t *testing.T) {
|
|||
testutil.Equals(t, http.StatusOK, resp.StatusCode)
|
||||
cleanupTestResponse(t, resp)
|
||||
|
||||
resp, err = http.Post("http://localhost:9091"+opts.RoutePrefix+"/api/v2/admin/tsdb/snapshot", "", strings.NewReader(""))
|
||||
resp, err = http.Post("http://localhost:9091"+opts.RoutePrefix+"/api/v1/admin/tsdb/snapshot", "", strings.NewReader(""))
|
||||
testutil.Ok(t, err)
|
||||
testutil.Equals(t, http.StatusOK, resp.StatusCode)
|
||||
cleanupSnapshot(t, resp)
|
||||
cleanupSnapshot(t, dbDir, resp)
|
||||
cleanupTestResponse(t, resp)
|
||||
|
||||
resp, err = http.Post("http://localhost:9091"+opts.RoutePrefix+"/api/v2/admin/tsdb/delete_series", "", strings.NewReader("{}"))
|
||||
resp, err = http.Post("http://localhost:9091"+opts.RoutePrefix+"/api/v1/admin/tsdb/delete_series?match[]=up", "", nil)
|
||||
testutil.Ok(t, err)
|
||||
testutil.Equals(t, http.StatusOK, resp.StatusCode)
|
||||
testutil.Equals(t, http.StatusNoContent, resp.StatusCode)
|
||||
cleanupTestResponse(t, resp)
|
||||
}
|
||||
|
||||
|
@ -468,14 +468,16 @@ func cleanupTestResponse(t *testing.T, resp *http.Response) {
|
|||
testutil.Ok(t, resp.Body.Close())
|
||||
}
|
||||
|
||||
func cleanupSnapshot(t *testing.T, resp *http.Response) {
|
||||
func cleanupSnapshot(t *testing.T, dbDir string, resp *http.Response) {
|
||||
snapshot := &struct {
|
||||
Data struct {
|
||||
Name string `json:"name"`
|
||||
} `json:"data"`
|
||||
}{}
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
testutil.Ok(t, err)
|
||||
testutil.Ok(t, json.Unmarshal(b, snapshot))
|
||||
testutil.Assert(t, snapshot.Name != "", "snapshot directory not returned")
|
||||
testutil.Ok(t, os.Remove(filepath.Join("snapshots", snapshot.Name)))
|
||||
os.Remove("snapshots") // We do not check for err here to prevent existing setups to fail.
|
||||
testutil.Assert(t, snapshot.Data.Name != "", "snapshot directory not returned")
|
||||
testutil.Ok(t, os.Remove(filepath.Join(dbDir, "snapshots", snapshot.Data.Name)))
|
||||
testutil.Ok(t, os.Remove(filepath.Join(dbDir, "snapshots")))
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue