mirror of
https://github.com/prometheus/prometheus.git
synced 2024-11-09 23:24:05 -08:00
update kubernetes deps to v1.13.0
Signed-off-by: tariqibrahim <tariq.ibrahim@microsoft.com>
This commit is contained in:
parent
08d6c83657
commit
412ca33226
|
@ -136,6 +136,22 @@ var (
|
|||
},
|
||||
[]string{"queue_name"},
|
||||
)
|
||||
clientGoWorkqueueUnfinishedWorkSecondsMetricVec = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: workqueueMetricsNamespace,
|
||||
Name: "unfinished_work_seconds",
|
||||
Help: "How long an item has remained unfinished in the work queue.",
|
||||
},
|
||||
[]string{"queue_name"},
|
||||
)
|
||||
clientGoWorkqueueLongestRunningProcessorMetricVec = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: workqueueMetricsNamespace,
|
||||
Name: "longest_running_processor_micro_seconds",
|
||||
Help: "The duration (in microseconds) of the longest running processor in the work queue.",
|
||||
},
|
||||
[]string{"queue_name"},
|
||||
)
|
||||
clientGoWorkqueueWorkDurationMetricVec = prometheus.NewSummaryVec(
|
||||
prometheus.SummaryOpts{
|
||||
Namespace: workqueueMetricsNamespace,
|
||||
|
@ -218,6 +234,8 @@ func (f *clientGoWorkqueueMetricsProvider) Register(registerer prometheus.Regist
|
|||
registerer.MustRegister(clientGoWorkqueueAddsMetricVec)
|
||||
registerer.MustRegister(clientGoWorkqueueLatencyMetricVec)
|
||||
registerer.MustRegister(clientGoWorkqueueWorkDurationMetricVec)
|
||||
registerer.MustRegister(clientGoWorkqueueUnfinishedWorkSecondsMetricVec)
|
||||
registerer.MustRegister(clientGoWorkqueueLongestRunningProcessorMetricVec)
|
||||
}
|
||||
|
||||
func (f *clientGoWorkqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric {
|
||||
|
@ -240,6 +258,12 @@ func (f *clientGoWorkqueueMetricsProvider) NewWorkDurationMetric(name string) wo
|
|||
metric.Observe(v / 1e6)
|
||||
})
|
||||
}
|
||||
func (f *clientGoWorkqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric {
|
||||
return clientGoWorkqueueUnfinishedWorkSecondsMetricVec.WithLabelValues(name)
|
||||
}
|
||||
func (f *clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorMicrosecondsMetric(name string) workqueue.SettableGaugeMetric {
|
||||
return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name)
|
||||
}
|
||||
func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric {
|
||||
// Retries are not used so the metric is omitted.
|
||||
return noopMetric{}
|
||||
|
|
11
go.mod
11
go.mod
|
@ -29,10 +29,10 @@ require (
|
|||
github.com/dustin/go-humanize v1.0.0 // indirect
|
||||
github.com/elastic/gosigar v0.9.0 // indirect
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0 // indirect
|
||||
github.com/evanphx/json-patch v4.1.0+incompatible // indirect
|
||||
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect
|
||||
github.com/fsnotify/fsnotify v1.4.7 // indirect
|
||||
github.com/getsentry/raven-go v0.1.0 // indirect
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680 // indirect
|
||||
github.com/go-ini/ini v1.21.1 // indirect
|
||||
github.com/go-kit/kit v0.0.0-20170517165212-6964666de57c
|
||||
github.com/go-logfmt/logfmt v0.3.0 // indirect
|
||||
|
@ -40,7 +40,6 @@ require (
|
|||
github.com/go-sql-driver/mysql v1.4.0 // indirect
|
||||
github.com/go-stack/stack v1.5.4 // indirect
|
||||
github.com/gogo/protobuf v0.0.0-20171123125729-971cbfd2e72b
|
||||
github.com/golang/glog v0.0.0-20141105023935-44145f04b68c // indirect
|
||||
github.com/golang/groupcache v0.0.0-20180924190550-6f2cf27854a4 // indirect
|
||||
github.com/golang/protobuf v0.0.0-20180622174009-9eb2c01ac278 // indirect
|
||||
github.com/golang/snappy v0.0.0-20160529050041-d9eb7a3d35ec
|
||||
|
@ -142,10 +141,12 @@ require (
|
|||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/vmihailenco/msgpack.v2 v2.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.0
|
||||
k8s.io/api v0.0.0-20181126151915-b503174bad59
|
||||
k8s.io/apimachinery v0.0.0-20181126123746-eddba98df674
|
||||
k8s.io/client-go v9.0.0+incompatible
|
||||
k8s.io/api v0.0.0-20181204000039-89a74a8d264d
|
||||
k8s.io/apimachinery v0.0.0-20181127025237-2b1284ed4c93
|
||||
k8s.io/client-go v2.0.0-alpha.0.0.20181121191925-a47917edff34+incompatible
|
||||
k8s.io/klog v0.1.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20180629012420-d83b052f768a // indirect
|
||||
labix.org/v2/mgo v0.0.0-20140701140051-000000000287 // indirect
|
||||
launchpad.net/gocheck v0.0.0-20140225173054-000000000087 // indirect
|
||||
sigs.k8s.io/yaml v1.1.0 // indirect
|
||||
)
|
||||
|
|
22
go.sum
22
go.sum
|
@ -54,14 +54,14 @@ github.com/elastic/gosigar v0.9.0 h1:ehdJWCzrtTHhYDmUAO6Zpu+uez4UB/dhH0oJSQ/o1Pk
|
|||
github.com/elastic/gosigar v0.9.0/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs=
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0 h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3CKKpKinvZLFk=
|
||||
github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
|
||||
github.com/evanphx/json-patch v4.1.0+incompatible h1:K1MDoo4AZ4wU0GIU/fPmtZg7VpzLjCxu+UwBD1FvwOc=
|
||||
github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw=
|
||||
github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA=
|
||||
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/getsentry/raven-go v0.1.0 h1:lc5jnN9D+q3panDpihwShgaOVvP6esoMEKbID2yhLoQ=
|
||||
github.com/getsentry/raven-go v0.1.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680 h1:ZktWZesgun21uEDrwW7iEV1zPCGQldM2atlJZ3TdvVM=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-ini/ini v1.21.1 h1:+QXUYsI7Tfxc64oD6R5BxU/Aq+UwGkyjH4W/hMNG7bg=
|
||||
github.com/go-ini/ini v1.21.1/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
|
||||
github.com/go-kit/kit v0.0.0-20170517165212-6964666de57c h1:pPzrDifQte6dw+pW/QL0zMUTAG2p6xOV6MES6UHrAbo=
|
||||
|
@ -76,8 +76,6 @@ github.com/go-stack/stack v1.5.4 h1:ACUuwAbOuCKT3mK+Az9UrqaSheA8lDWOfm0+ZT62NHY=
|
|||
github.com/go-stack/stack v1.5.4/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gogo/protobuf v0.0.0-20171123125729-971cbfd2e72b h1:9pWlytbUHf1Oz1gJGJMNt325N5hzLeW/Mik1lQbjN9M=
|
||||
github.com/gogo/protobuf v0.0.0-20171123125729-971cbfd2e72b/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/glog v0.0.0-20141105023935-44145f04b68c h1:CbdkBQ1/PiAo0FYJhQGwASD8wrgNvTdf01g6+O9tNuA=
|
||||
github.com/golang/glog v0.0.0-20141105023935-44145f04b68c/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20180924190550-6f2cf27854a4 h1:6UVLWz0fIIrv0UVj6t0A7cL48n8IyAdLVQqAYzEfsKI=
|
||||
github.com/golang/groupcache v0.0.0-20180924190550-6f2cf27854a4/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/protobuf v0.0.0-20180622174009-9eb2c01ac278 h1:K8gwDJfG+B1LjkmxrSw1L6P6dQ089va1xLQSz7JTNps=
|
||||
|
@ -291,15 +289,19 @@ gopkg.in/vmihailenco/msgpack.v2 v2.9.1 h1:kb0VV7NuIojvRfzwslQeP3yArBqJHW9tOl4t38
|
|||
gopkg.in/vmihailenco/msgpack.v2 v2.9.1/go.mod h1:/3Dn1Npt9+MYyLpYYXjInO/5jvMLamn+AEGwNEOatn8=
|
||||
gopkg.in/yaml.v2 v2.2.0 h1:ucE2Go3MGv/WipgucyA7X3+4pRLSbl5sd8WaEs60obQ=
|
||||
gopkg.in/yaml.v2 v2.2.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
k8s.io/api v0.0.0-20181126151915-b503174bad59 h1:uXjIvSvNtNUQjqpBznXm29/Ntx/6Aezf/wa0yAFryWE=
|
||||
k8s.io/api v0.0.0-20181126151915-b503174bad59/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
|
||||
k8s.io/apimachinery v0.0.0-20181126123746-eddba98df674 h1:S3ImTLK1F6igG0/5Tx8hf08XMRSwxhPfgtCLjs0Q8q4=
|
||||
k8s.io/apimachinery v0.0.0-20181126123746-eddba98df674/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
|
||||
k8s.io/client-go v9.0.0+incompatible h1:2kqW3X2xQ9SbFvWZjGEHBLlWc1LG9JIJNXWkuqwdZ3A=
|
||||
k8s.io/client-go v9.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
|
||||
k8s.io/api v0.0.0-20181204000039-89a74a8d264d h1:HQoGWsWUe/FmRcX9BU440AAMnzBFEf+DBo4nbkQlNzs=
|
||||
k8s.io/api v0.0.0-20181204000039-89a74a8d264d/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
|
||||
k8s.io/apimachinery v0.0.0-20181127025237-2b1284ed4c93 h1:tT6oQBi0qwLbbZSfDkdIsb23EwaLY85hoAV4SpXfdao=
|
||||
k8s.io/apimachinery v0.0.0-20181127025237-2b1284ed4c93/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
|
||||
k8s.io/client-go v2.0.0-alpha.0.0.20181121191925-a47917edff34+incompatible h1:7JnS1I1KbtbearjSCrycUhHSob+KjG6HDWY1GhjkAIU=
|
||||
k8s.io/client-go v2.0.0-alpha.0.0.20181121191925-a47917edff34+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
|
||||
k8s.io/klog v0.1.0 h1:I5HMfc/DtuVaGR1KPwUrTc476K8NCqNBldC7H4dYEzk=
|
||||
k8s.io/klog v0.1.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/kube-openapi v0.0.0-20180629012420-d83b052f768a h1:tHgpQvrWaYfrnC8G4N0Oszw5HHCsZxKilDi2R7HuCSM=
|
||||
k8s.io/kube-openapi v0.0.0-20180629012420-d83b052f768a/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
|
||||
labix.org/v2/mgo v0.0.0-20140701140051-000000000287 h1:L0cnkNl4TfAXzvdrqsYEmxOHOCv2p5I3taaReO8BWFs=
|
||||
labix.org/v2/mgo v0.0.0-20140701140051-000000000287/go.mod h1:Lg7AYkt1uXJoR9oeSZ3W/8IXLdvOfIITgZnommstyz4=
|
||||
launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbceQyw/gSGoXfH/UPoTGduL54=
|
||||
launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM=
|
||||
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
|
|
16
vendor/github.com/evanphx/json-patch/.travis.yml
generated
vendored
Normal file
16
vendor/github.com/evanphx/json-patch/.travis.yml
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.8
|
||||
- 1.7
|
||||
|
||||
install:
|
||||
- if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
|
||||
- go get github.com/jessevdk/go-flags
|
||||
|
||||
script:
|
||||
- go get
|
||||
- go test -cover ./...
|
||||
|
||||
notifications:
|
||||
email: false
|
25
vendor/github.com/evanphx/json-patch/LICENSE
generated
vendored
Normal file
25
vendor/github.com/evanphx/json-patch/LICENSE
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
Copyright (c) 2014, Evan Phoenix
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
* Neither the name of the Evan Phoenix nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this software
|
||||
without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
292
vendor/github.com/evanphx/json-patch/README.md
generated
vendored
Normal file
292
vendor/github.com/evanphx/json-patch/README.md
generated
vendored
Normal file
|
@ -0,0 +1,292 @@
|
|||
# JSON-Patch
|
||||
`jsonpatch` is a library which provides functionallity for both applying
|
||||
[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as
|
||||
well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396).
|
||||
|
||||
[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch)
|
||||
[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch)
|
||||
[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch)
|
||||
|
||||
# Get It!
|
||||
|
||||
**Latest and greatest**:
|
||||
```bash
|
||||
go get -u github.com/evanphx/json-patch
|
||||
```
|
||||
|
||||
**Stable Versions**:
|
||||
* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4`
|
||||
|
||||
(previous versions below `v3` are unavailable)
|
||||
|
||||
# Use It!
|
||||
* [Create and apply a merge patch](#create-and-apply-a-merge-patch)
|
||||
* [Create and apply a JSON Patch](#create-and-apply-a-json-patch)
|
||||
* [Comparing JSON documents](#comparing-json-documents)
|
||||
* [Combine merge patches](#combine-merge-patches)
|
||||
|
||||
|
||||
# Configuration
|
||||
|
||||
There is a single global configuration variable `jsonpatch.SupportNegativeIndices'. This
|
||||
defaults to `true` and enables the non-standard practice of allowing negative indices
|
||||
to mean indices starting at the end of an array. This functionality can be disabled
|
||||
by setting `jsonpatch.SupportNegativeIndices = false`.
|
||||
|
||||
## Create and apply a merge patch
|
||||
Given both an original JSON document and a modified JSON document, you can create
|
||||
a [Merge Patch](https://tools.ietf.org/html/rfc7396) document.
|
||||
|
||||
It can describe the changes needed to convert from the original to the
|
||||
modified JSON document.
|
||||
|
||||
Once you have a merge patch, you can apply it to other JSON documents using the
|
||||
`jsonpatch.MergePatch(document, patch)` function.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Let's create a merge patch from these two documents...
|
||||
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
|
||||
target := []byte(`{"name": "Jane", "age": 24}`)
|
||||
|
||||
patch, err := jsonpatch.CreateMergePatch(original, target)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Now lets apply the patch against a different JSON document...
|
||||
|
||||
alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`)
|
||||
modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch)
|
||||
|
||||
fmt.Printf("patch document: %s\n", patch)
|
||||
fmt.Printf("updated alternative doc: %s\n", modifiedAlternative)
|
||||
}
|
||||
```
|
||||
|
||||
When ran, you get the following output:
|
||||
|
||||
```bash
|
||||
$ go run main.go
|
||||
patch document: {"height":null,"name":"Jane"}
|
||||
updated tina doc: {"age":28,"name":"Jane"}
|
||||
```
|
||||
|
||||
## Create and apply a JSON Patch
|
||||
You can create patch objects using `DecodePatch([]byte)`, which can then
|
||||
be applied against JSON documents.
|
||||
|
||||
The following is an example of creating a patch from two operations, and
|
||||
applying it against a JSON document.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
)
|
||||
|
||||
func main() {
|
||||
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
|
||||
patchJSON := []byte(`[
|
||||
{"op": "replace", "path": "/name", "value": "Jane"},
|
||||
{"op": "remove", "path": "/height"}
|
||||
]`)
|
||||
|
||||
patch, err := jsonpatch.DecodePatch(patchJSON)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
modified, err := patch.Apply(original)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Printf("Original document: %s\n", original)
|
||||
fmt.Printf("Modified document: %s\n", modified)
|
||||
}
|
||||
```
|
||||
|
||||
When ran, you get the following output:
|
||||
|
||||
```bash
|
||||
$ go run main.go
|
||||
Original document: {"name": "John", "age": 24, "height": 3.21}
|
||||
Modified document: {"age":24,"name":"Jane"}
|
||||
```
|
||||
|
||||
## Comparing JSON documents
|
||||
Due to potential whitespace and ordering differences, one cannot simply compare
|
||||
JSON strings or byte-arrays directly.
|
||||
|
||||
As such, you can instead use `jsonpatch.Equal(document1, document2)` to
|
||||
determine if two JSON documents are _structurally_ equal. This ignores
|
||||
whitespace differences, and key-value ordering.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
)
|
||||
|
||||
func main() {
|
||||
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
|
||||
similar := []byte(`
|
||||
{
|
||||
"age": 24,
|
||||
"height": 3.21,
|
||||
"name": "John"
|
||||
}
|
||||
`)
|
||||
different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`)
|
||||
|
||||
if jsonpatch.Equal(original, similar) {
|
||||
fmt.Println(`"original" is structurally equal to "similar"`)
|
||||
}
|
||||
|
||||
if !jsonpatch.Equal(original, different) {
|
||||
fmt.Println(`"original" is _not_ structurally equal to "similar"`)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
When ran, you get the following output:
|
||||
```bash
|
||||
$ go run main.go
|
||||
"original" is structurally equal to "similar"
|
||||
"original" is _not_ structurally equal to "similar"
|
||||
```
|
||||
|
||||
## Combine merge patches
|
||||
Given two JSON merge patch documents, it is possible to combine them into a
|
||||
single merge patch which can describe both set of changes.
|
||||
|
||||
The resulting merge patch can be used such that applying it results in a
|
||||
document structurally similar as merging each merge patch to the document
|
||||
in succession.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
)
|
||||
|
||||
func main() {
|
||||
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
|
||||
|
||||
nameAndHeight := []byte(`{"height":null,"name":"Jane"}`)
|
||||
ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`)
|
||||
|
||||
// Let's combine these merge patch documents...
|
||||
combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Apply each patch individual against the original document
|
||||
withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Apply the combined patch against the original document
|
||||
|
||||
withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Do both result in the same thing? They should!
|
||||
if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) {
|
||||
fmt.Println("Both JSON documents are structurally the same!")
|
||||
}
|
||||
|
||||
fmt.Printf("combined merge patch: %s", combinedPatch)
|
||||
}
|
||||
```
|
||||
|
||||
When ran, you get the following output:
|
||||
```bash
|
||||
$ go run main.go
|
||||
Both JSON documents are structurally the same!
|
||||
combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"}
|
||||
```
|
||||
|
||||
# CLI for comparing JSON documents
|
||||
You can install the commandline program `json-patch`.
|
||||
|
||||
This program can take multiple JSON patch documents as arguments,
|
||||
and fed a JSON document from `stdin`. It will apply the patch(es) against
|
||||
the document and output the modified doc.
|
||||
|
||||
**patch.1.json**
|
||||
```json
|
||||
[
|
||||
{"op": "replace", "path": "/name", "value": "Jane"},
|
||||
{"op": "remove", "path": "/height"}
|
||||
]
|
||||
```
|
||||
|
||||
**patch.2.json**
|
||||
```json
|
||||
[
|
||||
{"op": "add", "path": "/address", "value": "123 Main St"},
|
||||
{"op": "replace", "path": "/age", "value": "21"}
|
||||
]
|
||||
```
|
||||
|
||||
**document.json**
|
||||
```json
|
||||
{
|
||||
"name": "John",
|
||||
"age": 24,
|
||||
"height": 3.21
|
||||
}
|
||||
```
|
||||
|
||||
You can then run:
|
||||
|
||||
```bash
|
||||
$ go install github.com/evanphx/json-patch/cmd/json-patch
|
||||
$ cat document.json | json-patch -p patch.1.json -p patch.2.json
|
||||
{"address":"123 Main St","age":"21","name":"Jane"}
|
||||
```
|
||||
|
||||
# Help It!
|
||||
Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues)
|
||||
or [create a PR](https://github.com/evanphx/json-patch/compare).
|
||||
|
||||
|
||||
Before creating a pull request, we'd ask that you make sure tests are passing
|
||||
and that you have added new tests when applicable.
|
||||
|
||||
Contributors can run tests using:
|
||||
|
||||
```bash
|
||||
go test -cover ./...
|
||||
```
|
||||
|
||||
Builds for pull requests are tested automatically
|
||||
using [TravisCI](https://travis-ci.org/evanphx/json-patch).
|
383
vendor/github.com/evanphx/json-patch/merge.go
generated
vendored
Normal file
383
vendor/github.com/evanphx/json-patch/merge.go
generated
vendored
Normal file
|
@ -0,0 +1,383 @@
|
|||
package jsonpatch
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {
|
||||
curDoc, err := cur.intoDoc()
|
||||
|
||||
if err != nil {
|
||||
pruneNulls(patch)
|
||||
return patch
|
||||
}
|
||||
|
||||
patchDoc, err := patch.intoDoc()
|
||||
|
||||
if err != nil {
|
||||
return patch
|
||||
}
|
||||
|
||||
mergeDocs(curDoc, patchDoc, mergeMerge)
|
||||
|
||||
return cur
|
||||
}
|
||||
|
||||
func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
|
||||
for k, v := range *patch {
|
||||
if v == nil {
|
||||
if mergeMerge {
|
||||
(*doc)[k] = nil
|
||||
} else {
|
||||
delete(*doc, k)
|
||||
}
|
||||
} else {
|
||||
cur, ok := (*doc)[k]
|
||||
|
||||
if !ok || cur == nil {
|
||||
pruneNulls(v)
|
||||
(*doc)[k] = v
|
||||
} else {
|
||||
(*doc)[k] = merge(cur, v, mergeMerge)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func pruneNulls(n *lazyNode) {
|
||||
sub, err := n.intoDoc()
|
||||
|
||||
if err == nil {
|
||||
pruneDocNulls(sub)
|
||||
} else {
|
||||
ary, err := n.intoAry()
|
||||
|
||||
if err == nil {
|
||||
pruneAryNulls(ary)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func pruneDocNulls(doc *partialDoc) *partialDoc {
|
||||
for k, v := range *doc {
|
||||
if v == nil {
|
||||
delete(*doc, k)
|
||||
} else {
|
||||
pruneNulls(v)
|
||||
}
|
||||
}
|
||||
|
||||
return doc
|
||||
}
|
||||
|
||||
func pruneAryNulls(ary *partialArray) *partialArray {
|
||||
newAry := []*lazyNode{}
|
||||
|
||||
for _, v := range *ary {
|
||||
if v != nil {
|
||||
pruneNulls(v)
|
||||
newAry = append(newAry, v)
|
||||
}
|
||||
}
|
||||
|
||||
*ary = newAry
|
||||
|
||||
return ary
|
||||
}
|
||||
|
||||
var errBadJSONDoc = fmt.Errorf("Invalid JSON Document")
|
||||
var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch")
|
||||
var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents")
|
||||
|
||||
// MergeMergePatches merges two merge patches together, such that
|
||||
// applying this resulting merged merge patch to a document yields the same
|
||||
// as merging each merge patch to the document in succession.
|
||||
func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) {
|
||||
return doMergePatch(patch1Data, patch2Data, true)
|
||||
}
|
||||
|
||||
// MergePatch merges the patchData into the docData.
|
||||
func MergePatch(docData, patchData []byte) ([]byte, error) {
|
||||
return doMergePatch(docData, patchData, false)
|
||||
}
|
||||
|
||||
func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
|
||||
doc := &partialDoc{}
|
||||
|
||||
docErr := json.Unmarshal(docData, doc)
|
||||
|
||||
patch := &partialDoc{}
|
||||
|
||||
patchErr := json.Unmarshal(patchData, patch)
|
||||
|
||||
if _, ok := docErr.(*json.SyntaxError); ok {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
if _, ok := patchErr.(*json.SyntaxError); ok {
|
||||
return nil, errBadJSONPatch
|
||||
}
|
||||
|
||||
if docErr == nil && *doc == nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
if patchErr == nil && *patch == nil {
|
||||
return nil, errBadJSONPatch
|
||||
}
|
||||
|
||||
if docErr != nil || patchErr != nil {
|
||||
// Not an error, just not a doc, so we turn straight into the patch
|
||||
if patchErr == nil {
|
||||
if mergeMerge {
|
||||
doc = patch
|
||||
} else {
|
||||
doc = pruneDocNulls(patch)
|
||||
}
|
||||
} else {
|
||||
patchAry := &partialArray{}
|
||||
patchErr = json.Unmarshal(patchData, patchAry)
|
||||
|
||||
if patchErr != nil {
|
||||
return nil, errBadJSONPatch
|
||||
}
|
||||
|
||||
pruneAryNulls(patchAry)
|
||||
|
||||
out, patchErr := json.Marshal(patchAry)
|
||||
|
||||
if patchErr != nil {
|
||||
return nil, errBadJSONPatch
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
} else {
|
||||
mergeDocs(doc, patch, mergeMerge)
|
||||
}
|
||||
|
||||
return json.Marshal(doc)
|
||||
}
|
||||
|
||||
// resemblesJSONArray indicates whether the byte-slice "appears" to be
|
||||
// a JSON array or not.
|
||||
// False-positives are possible, as this function does not check the internal
|
||||
// structure of the array. It only checks that the outer syntax is present and
|
||||
// correct.
|
||||
func resemblesJSONArray(input []byte) bool {
|
||||
input = bytes.TrimSpace(input)
|
||||
|
||||
hasPrefix := bytes.HasPrefix(input, []byte("["))
|
||||
hasSuffix := bytes.HasSuffix(input, []byte("]"))
|
||||
|
||||
return hasPrefix && hasSuffix
|
||||
}
|
||||
|
||||
// CreateMergePatch will return a merge patch document capable of converting
|
||||
// the original document(s) to the modified document(s).
|
||||
// The parameters can be bytes of either two JSON Documents, or two arrays of
|
||||
// JSON documents.
|
||||
// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07
|
||||
func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
||||
originalResemblesArray := resemblesJSONArray(originalJSON)
|
||||
modifiedResemblesArray := resemblesJSONArray(modifiedJSON)
|
||||
|
||||
// Do both byte-slices seem like JSON arrays?
|
||||
if originalResemblesArray && modifiedResemblesArray {
|
||||
return createArrayMergePatch(originalJSON, modifiedJSON)
|
||||
}
|
||||
|
||||
// Are both byte-slices are not arrays? Then they are likely JSON objects...
|
||||
if !originalResemblesArray && !modifiedResemblesArray {
|
||||
return createObjectMergePatch(originalJSON, modifiedJSON)
|
||||
}
|
||||
|
||||
// None of the above? Then return an error because of mismatched types.
|
||||
return nil, errBadMergeTypes
|
||||
}
|
||||
|
||||
// createObjectMergePatch will return a merge-patch document capable of
|
||||
// converting the original document to the modified document.
|
||||
func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
||||
originalDoc := map[string]interface{}{}
|
||||
modifiedDoc := map[string]interface{}{}
|
||||
|
||||
err := json.Unmarshal(originalJSON, &originalDoc)
|
||||
if err != nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
err = json.Unmarshal(modifiedJSON, &modifiedDoc)
|
||||
if err != nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
dest, err := getDiff(originalDoc, modifiedDoc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return json.Marshal(dest)
|
||||
}
|
||||
|
||||
// createArrayMergePatch will return an array of merge-patch documents capable
|
||||
// of converting the original document to the modified document for each
|
||||
// pair of JSON documents provided in the arrays.
|
||||
// Arrays of mismatched sizes will result in an error.
|
||||
func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
||||
originalDocs := []json.RawMessage{}
|
||||
modifiedDocs := []json.RawMessage{}
|
||||
|
||||
err := json.Unmarshal(originalJSON, &originalDocs)
|
||||
if err != nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
err = json.Unmarshal(modifiedJSON, &modifiedDocs)
|
||||
if err != nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
total := len(originalDocs)
|
||||
if len(modifiedDocs) != total {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
result := []json.RawMessage{}
|
||||
for i := 0; i < len(originalDocs); i++ {
|
||||
original := originalDocs[i]
|
||||
modified := modifiedDocs[i]
|
||||
|
||||
patch, err := createObjectMergePatch(original, modified)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result = append(result, json.RawMessage(patch))
|
||||
}
|
||||
|
||||
return json.Marshal(result)
|
||||
}
|
||||
|
||||
// Returns true if the array matches (must be json types).
|
||||
// As is idiomatic for go, an empty array is not the same as a nil array.
|
||||
func matchesArray(a, b []interface{}) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
if (a == nil && b != nil) || (a != nil && b == nil) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if !matchesValue(a[i], b[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns true if the values matches (must be json types)
|
||||
// The types of the values must match, otherwise it will always return false
|
||||
// If two map[string]interface{} are given, all elements must match.
|
||||
func matchesValue(av, bv interface{}) bool {
|
||||
if reflect.TypeOf(av) != reflect.TypeOf(bv) {
|
||||
return false
|
||||
}
|
||||
switch at := av.(type) {
|
||||
case string:
|
||||
bt := bv.(string)
|
||||
if bt == at {
|
||||
return true
|
||||
}
|
||||
case float64:
|
||||
bt := bv.(float64)
|
||||
if bt == at {
|
||||
return true
|
||||
}
|
||||
case bool:
|
||||
bt := bv.(bool)
|
||||
if bt == at {
|
||||
return true
|
||||
}
|
||||
case nil:
|
||||
// Both nil, fine.
|
||||
return true
|
||||
case map[string]interface{}:
|
||||
bt := bv.(map[string]interface{})
|
||||
for key := range at {
|
||||
if !matchesValue(at[key], bt[key]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for key := range bt {
|
||||
if !matchesValue(at[key], bt[key]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case []interface{}:
|
||||
bt := bv.([]interface{})
|
||||
return matchesArray(at, bt)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getDiff returns the (recursive) difference between a and b as a map[string]interface{}.
|
||||
func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {
|
||||
into := map[string]interface{}{}
|
||||
for key, bv := range b {
|
||||
av, ok := a[key]
|
||||
// value was added
|
||||
if !ok {
|
||||
into[key] = bv
|
||||
continue
|
||||
}
|
||||
// If types have changed, replace completely
|
||||
if reflect.TypeOf(av) != reflect.TypeOf(bv) {
|
||||
into[key] = bv
|
||||
continue
|
||||
}
|
||||
// Types are the same, compare values
|
||||
switch at := av.(type) {
|
||||
case map[string]interface{}:
|
||||
bt := bv.(map[string]interface{})
|
||||
dst := make(map[string]interface{}, len(bt))
|
||||
dst, err := getDiff(at, bt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) > 0 {
|
||||
into[key] = dst
|
||||
}
|
||||
case string, float64, bool:
|
||||
if !matchesValue(av, bv) {
|
||||
into[key] = bv
|
||||
}
|
||||
case []interface{}:
|
||||
bt := bv.([]interface{})
|
||||
if !matchesArray(at, bt) {
|
||||
into[key] = bv
|
||||
}
|
||||
case nil:
|
||||
switch bv.(type) {
|
||||
case nil:
|
||||
// Both nil, fine.
|
||||
default:
|
||||
into[key] = bv
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown type:%T in key %s", av, key))
|
||||
}
|
||||
}
|
||||
// Now add all deleted values as nil
|
||||
for key := range a {
|
||||
_, found := b[key]
|
||||
if !found {
|
||||
into[key] = nil
|
||||
}
|
||||
}
|
||||
return into, nil
|
||||
}
|
682
vendor/github.com/evanphx/json-patch/patch.go
generated
vendored
Normal file
682
vendor/github.com/evanphx/json-patch/patch.go
generated
vendored
Normal file
|
@ -0,0 +1,682 @@
|
|||
package jsonpatch
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
eRaw = iota
|
||||
eDoc
|
||||
eAry
|
||||
)
|
||||
|
||||
var SupportNegativeIndices bool = true
|
||||
|
||||
type lazyNode struct {
|
||||
raw *json.RawMessage
|
||||
doc partialDoc
|
||||
ary partialArray
|
||||
which int
|
||||
}
|
||||
|
||||
type operation map[string]*json.RawMessage
|
||||
|
||||
// Patch is an ordered collection of operations.
|
||||
type Patch []operation
|
||||
|
||||
type partialDoc map[string]*lazyNode
|
||||
type partialArray []*lazyNode
|
||||
|
||||
type container interface {
|
||||
get(key string) (*lazyNode, error)
|
||||
set(key string, val *lazyNode) error
|
||||
add(key string, val *lazyNode) error
|
||||
remove(key string) error
|
||||
}
|
||||
|
||||
func newLazyNode(raw *json.RawMessage) *lazyNode {
|
||||
return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw}
|
||||
}
|
||||
|
||||
func (n *lazyNode) MarshalJSON() ([]byte, error) {
|
||||
switch n.which {
|
||||
case eRaw:
|
||||
return json.Marshal(n.raw)
|
||||
case eDoc:
|
||||
return json.Marshal(n.doc)
|
||||
case eAry:
|
||||
return json.Marshal(n.ary)
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown type")
|
||||
}
|
||||
}
|
||||
|
||||
func (n *lazyNode) UnmarshalJSON(data []byte) error {
|
||||
dest := make(json.RawMessage, len(data))
|
||||
copy(dest, data)
|
||||
n.raw = &dest
|
||||
n.which = eRaw
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *lazyNode) intoDoc() (*partialDoc, error) {
|
||||
if n.which == eDoc {
|
||||
return &n.doc, nil
|
||||
}
|
||||
|
||||
if n.raw == nil {
|
||||
return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial document")
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.doc)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n.which = eDoc
|
||||
return &n.doc, nil
|
||||
}
|
||||
|
||||
func (n *lazyNode) intoAry() (*partialArray, error) {
|
||||
if n.which == eAry {
|
||||
return &n.ary, nil
|
||||
}
|
||||
|
||||
if n.raw == nil {
|
||||
return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial array")
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.ary)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n.which = eAry
|
||||
return &n.ary, nil
|
||||
}
|
||||
|
||||
func (n *lazyNode) compact() []byte {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
if n.raw == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := json.Compact(buf, *n.raw)
|
||||
|
||||
if err != nil {
|
||||
return *n.raw
|
||||
}
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (n *lazyNode) tryDoc() bool {
|
||||
if n.raw == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.doc)
|
||||
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
n.which = eDoc
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *lazyNode) tryAry() bool {
|
||||
if n.raw == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.ary)
|
||||
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
n.which = eAry
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *lazyNode) equal(o *lazyNode) bool {
|
||||
if n.which == eRaw {
|
||||
if !n.tryDoc() && !n.tryAry() {
|
||||
if o.which != eRaw {
|
||||
return false
|
||||
}
|
||||
|
||||
return bytes.Equal(n.compact(), o.compact())
|
||||
}
|
||||
}
|
||||
|
||||
if n.which == eDoc {
|
||||
if o.which == eRaw {
|
||||
if !o.tryDoc() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if o.which != eDoc {
|
||||
return false
|
||||
}
|
||||
|
||||
for k, v := range n.doc {
|
||||
ov, ok := o.doc[k]
|
||||
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if v == nil && ov == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if !v.equal(ov) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
if o.which != eAry && !o.tryAry() {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(n.ary) != len(o.ary) {
|
||||
return false
|
||||
}
|
||||
|
||||
for idx, val := range n.ary {
|
||||
if !val.equal(o.ary[idx]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (o operation) kind() string {
|
||||
if obj, ok := o["op"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
err := json.Unmarshal(*obj, &op)
|
||||
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
return op
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
func (o operation) path() string {
|
||||
if obj, ok := o["path"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
err := json.Unmarshal(*obj, &op)
|
||||
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
return op
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
func (o operation) from() string {
|
||||
if obj, ok := o["from"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
err := json.Unmarshal(*obj, &op)
|
||||
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
return op
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
func (o operation) value() *lazyNode {
|
||||
if obj, ok := o["value"]; ok {
|
||||
return newLazyNode(obj)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isArray(buf []byte) bool {
|
||||
Loop:
|
||||
for _, c := range buf {
|
||||
switch c {
|
||||
case ' ':
|
||||
case '\n':
|
||||
case '\t':
|
||||
continue
|
||||
case '[':
|
||||
return true
|
||||
default:
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func findObject(pd *container, path string) (container, string) {
|
||||
doc := *pd
|
||||
|
||||
split := strings.Split(path, "/")
|
||||
|
||||
if len(split) < 2 {
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
parts := split[1 : len(split)-1]
|
||||
|
||||
key := split[len(split)-1]
|
||||
|
||||
var err error
|
||||
|
||||
for _, part := range parts {
|
||||
|
||||
next, ok := doc.get(decodePatchKey(part))
|
||||
|
||||
if next == nil || ok != nil {
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
if isArray(*next.raw) {
|
||||
doc, err = next.intoAry()
|
||||
|
||||
if err != nil {
|
||||
return nil, ""
|
||||
}
|
||||
} else {
|
||||
doc, err = next.intoDoc()
|
||||
|
||||
if err != nil {
|
||||
return nil, ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return doc, decodePatchKey(key)
|
||||
}
|
||||
|
||||
func (d *partialDoc) set(key string, val *lazyNode) error {
|
||||
(*d)[key] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialDoc) add(key string, val *lazyNode) error {
|
||||
(*d)[key] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialDoc) get(key string) (*lazyNode, error) {
|
||||
return (*d)[key], nil
|
||||
}
|
||||
|
||||
func (d *partialDoc) remove(key string) error {
|
||||
_, ok := (*d)[key]
|
||||
if !ok {
|
||||
return fmt.Errorf("Unable to remove nonexistent key: %s", key)
|
||||
}
|
||||
|
||||
delete(*d, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialArray) set(key string, val *lazyNode) error {
|
||||
if key == "-" {
|
||||
*d = append(*d, val)
|
||||
return nil
|
||||
}
|
||||
|
||||
idx, err := strconv.Atoi(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sz := len(*d)
|
||||
if idx+1 > sz {
|
||||
sz = idx + 1
|
||||
}
|
||||
|
||||
ary := make([]*lazyNode, sz)
|
||||
|
||||
cur := *d
|
||||
|
||||
copy(ary, cur)
|
||||
|
||||
if idx >= len(ary) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
ary[idx] = val
|
||||
|
||||
*d = ary
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialArray) add(key string, val *lazyNode) error {
|
||||
if key == "-" {
|
||||
*d = append(*d, val)
|
||||
return nil
|
||||
}
|
||||
|
||||
idx, err := strconv.Atoi(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ary := make([]*lazyNode, len(*d)+1)
|
||||
|
||||
cur := *d
|
||||
|
||||
if idx >= len(ary) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if SupportNegativeIndices {
|
||||
if idx < -len(ary) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if idx < 0 {
|
||||
idx += len(ary)
|
||||
}
|
||||
}
|
||||
|
||||
copy(ary[0:idx], cur[0:idx])
|
||||
ary[idx] = val
|
||||
copy(ary[idx+1:], cur[idx:])
|
||||
|
||||
*d = ary
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialArray) get(key string) (*lazyNode, error) {
|
||||
idx, err := strconv.Atoi(key)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if idx >= len(*d) {
|
||||
return nil, fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
return (*d)[idx], nil
|
||||
}
|
||||
|
||||
func (d *partialArray) remove(key string) error {
|
||||
idx, err := strconv.Atoi(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cur := *d
|
||||
|
||||
if idx >= len(cur) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if SupportNegativeIndices {
|
||||
if idx < -len(cur) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if idx < 0 {
|
||||
idx += len(cur)
|
||||
}
|
||||
}
|
||||
|
||||
ary := make([]*lazyNode, len(cur)-1)
|
||||
|
||||
copy(ary[0:idx], cur[0:idx])
|
||||
copy(ary[idx:], cur[idx+1:])
|
||||
|
||||
*d = ary
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (p Patch) add(doc *container, op operation) error {
|
||||
path := op.path()
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch add operation does not apply: doc is missing path: \"%s\"", path)
|
||||
}
|
||||
|
||||
return con.add(key, op.value())
|
||||
}
|
||||
|
||||
func (p Patch) remove(doc *container, op operation) error {
|
||||
path := op.path()
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch remove operation does not apply: doc is missing path: \"%s\"", path)
|
||||
}
|
||||
|
||||
return con.remove(key)
|
||||
}
|
||||
|
||||
func (p Patch) replace(doc *container, op operation) error {
|
||||
path := op.path()
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing path: %s", path)
|
||||
}
|
||||
|
||||
_, ok := con.get(key)
|
||||
if ok != nil {
|
||||
return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing key: %s", path)
|
||||
}
|
||||
|
||||
return con.set(key, op.value())
|
||||
}
|
||||
|
||||
func (p Patch) move(doc *container, op operation) error {
|
||||
from := op.from()
|
||||
|
||||
con, key := findObject(doc, from)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch move operation does not apply: doc is missing from path: %s", from)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = con.remove(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := op.path()
|
||||
|
||||
con, key = findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch move operation does not apply: doc is missing destination path: %s", path)
|
||||
}
|
||||
|
||||
return con.set(key, val)
|
||||
}
|
||||
|
||||
func (p Patch) test(doc *container, op operation) error {
|
||||
path := op.path()
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch test operation does not apply: is missing path: %s", path)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if val == nil {
|
||||
if op.value().raw == nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Testing value %s failed", path)
|
||||
} else if op.value() == nil {
|
||||
return fmt.Errorf("Testing value %s failed", path)
|
||||
}
|
||||
|
||||
if val.equal(op.value()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Testing value %s failed", path)
|
||||
}
|
||||
|
||||
func (p Patch) copy(doc *container, op operation) error {
|
||||
from := op.from()
|
||||
|
||||
con, key := findObject(doc, from)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing from path: %s", from)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := op.path()
|
||||
|
||||
con, key = findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing destination path: %s", path)
|
||||
}
|
||||
|
||||
return con.set(key, val)
|
||||
}
|
||||
|
||||
// Equal indicates if 2 JSON documents have the same structural equality.
|
||||
func Equal(a, b []byte) bool {
|
||||
ra := make(json.RawMessage, len(a))
|
||||
copy(ra, a)
|
||||
la := newLazyNode(&ra)
|
||||
|
||||
rb := make(json.RawMessage, len(b))
|
||||
copy(rb, b)
|
||||
lb := newLazyNode(&rb)
|
||||
|
||||
return la.equal(lb)
|
||||
}
|
||||
|
||||
// DecodePatch decodes the passed JSON document as an RFC 6902 patch.
|
||||
func DecodePatch(buf []byte) (Patch, error) {
|
||||
var p Patch
|
||||
|
||||
err := json.Unmarshal(buf, &p)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Apply mutates a JSON document according to the patch, and returns the new
|
||||
// document.
|
||||
func (p Patch) Apply(doc []byte) ([]byte, error) {
|
||||
return p.ApplyIndent(doc, "")
|
||||
}
|
||||
|
||||
// ApplyIndent mutates a JSON document according to the patch, and returns the new
|
||||
// document indented.
|
||||
func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
|
||||
var pd container
|
||||
if doc[0] == '[' {
|
||||
pd = &partialArray{}
|
||||
} else {
|
||||
pd = &partialDoc{}
|
||||
}
|
||||
|
||||
err := json.Unmarshal(doc, pd)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = nil
|
||||
|
||||
for _, op := range p {
|
||||
switch op.kind() {
|
||||
case "add":
|
||||
err = p.add(&pd, op)
|
||||
case "remove":
|
||||
err = p.remove(&pd, op)
|
||||
case "replace":
|
||||
err = p.replace(&pd, op)
|
||||
case "move":
|
||||
err = p.move(&pd, op)
|
||||
case "test":
|
||||
err = p.test(&pd, op)
|
||||
case "copy":
|
||||
err = p.copy(&pd, op)
|
||||
default:
|
||||
err = fmt.Errorf("Unexpected kind: %s", op.kind())
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if indent != "" {
|
||||
return json.MarshalIndent(pd, "", indent)
|
||||
}
|
||||
|
||||
return json.Marshal(pd)
|
||||
}
|
||||
|
||||
// From http://tools.ietf.org/html/rfc6901#section-4 :
|
||||
//
|
||||
// Evaluation of each reference token begins by decoding any escaped
|
||||
// character sequence. This is performed by first transforming any
|
||||
// occurrence of the sequence '~1' to '/', and then transforming any
|
||||
// occurrence of the sequence '~0' to '~'.
|
||||
|
||||
var (
|
||||
rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~")
|
||||
)
|
||||
|
||||
func decodePatchKey(k string) string {
|
||||
return rfc6901Decoder.Replace(k)
|
||||
}
|
7
vendor/github.com/ghodss/yaml/.travis.yml
generated
vendored
7
vendor/github.com/ghodss/yaml/.travis.yml
generated
vendored
|
@ -1,7 +0,0 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.3
|
||||
- 1.4
|
||||
script:
|
||||
- go test
|
||||
- go build
|
2
vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go
generated
vendored
2
vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go
generated
vendored
|
@ -16,10 +16,10 @@ limitations under the License.
|
|||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
// +groupName=admissionregistration.k8s.io
|
||||
|
||||
// Package v1alpha1 is the v1alpha1 version of the API.
|
||||
// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration
|
||||
// InitializerConfiguration and validatingWebhookConfiguration is for the
|
||||
// new dynamic admission controller configuration.
|
||||
// +groupName=admissionregistration.k8s.io
|
||||
package v1alpha1 // import "k8s.io/api/admissionregistration/v1alpha1"
|
||||
|
|
21
vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go
generated
vendored
21
vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1alpha1 is a generated protocol buffer package.
|
||||
|
@ -251,24 +250,6 @@ func (m *Rule) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
|
6
vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
generated
vendored
6
vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
generated
vendored
|
@ -88,7 +88,7 @@ message Rule {
|
|||
repeated string apiVersions = 2;
|
||||
|
||||
// Resources is a list of resources this rule applies to.
|
||||
//
|
||||
//
|
||||
// For example:
|
||||
// 'pods' means pods.
|
||||
// 'pods/log' means the log subresource of pods.
|
||||
|
@ -96,10 +96,10 @@ message Rule {
|
|||
// 'pods/*' means all subresources of pods.
|
||||
// '*/scale' means all scale subresources.
|
||||
// '*/*' means all resources and their subresources.
|
||||
//
|
||||
//
|
||||
// If wildcard is present, the validation rule will ensure resources do not
|
||||
// overlap with each other.
|
||||
//
|
||||
//
|
||||
// Depending on the enclosing object, subresources might not be allowed.
|
||||
// Required.
|
||||
repeated string resources = 3;
|
||||
|
|
2
vendor/k8s.io/api/admissionregistration/v1beta1/doc.go
generated
vendored
2
vendor/k8s.io/api/admissionregistration/v1beta1/doc.go
generated
vendored
|
@ -16,10 +16,10 @@ limitations under the License.
|
|||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
// +groupName=admissionregistration.k8s.io
|
||||
|
||||
// Package v1beta1 is the v1beta1 version of the API.
|
||||
// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration
|
||||
// InitializerConfiguration and validatingWebhookConfiguration is for the
|
||||
// new dynamic admission controller configuration.
|
||||
// +groupName=admissionregistration.k8s.io
|
||||
package v1beta1 // import "k8s.io/api/admissionregistration/v1beta1"
|
||||
|
|
21
vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
generated
vendored
21
vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1beta1 is a generated protocol buffer package.
|
||||
|
@ -506,24 +505,6 @@ func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
|
40
vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
generated
vendored
40
vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
generated
vendored
|
@ -66,7 +66,7 @@ message Rule {
|
|||
repeated string apiVersions = 2;
|
||||
|
||||
// Resources is a list of resources this rule applies to.
|
||||
//
|
||||
//
|
||||
// For example:
|
||||
// 'pods' means pods.
|
||||
// 'pods/log' means the log subresource of pods.
|
||||
|
@ -74,10 +74,10 @@ message Rule {
|
|||
// 'pods/*' means all subresources of pods.
|
||||
// '*/scale' means all scale subresources.
|
||||
// '*/*' means all resources and their subresources.
|
||||
//
|
||||
//
|
||||
// If wildcard is present, the validation rule will ensure resources do not
|
||||
// overlap with each other.
|
||||
//
|
||||
//
|
||||
// Depending on the enclosing object, subresources might not be allowed.
|
||||
// Required.
|
||||
repeated string resources = 3;
|
||||
|
@ -168,7 +168,7 @@ message Webhook {
|
|||
// object itself is a namespace, the matching is performed on
|
||||
// object.metadata.labels. If the object is another cluster scoped resource,
|
||||
// it never skips the webhook.
|
||||
//
|
||||
//
|
||||
// For example, to run the webhook on any objects whose namespace is not
|
||||
// associated with "runlevel" of "0" or "1"; you will set the selector as
|
||||
// follows:
|
||||
|
@ -184,7 +184,7 @@ message Webhook {
|
|||
// }
|
||||
// ]
|
||||
// }
|
||||
//
|
||||
//
|
||||
// If instead you want to only run the webhook on any objects whose
|
||||
// namespace is associated with the "environment" of "prod" or "staging";
|
||||
// you will set the selector as follows:
|
||||
|
@ -200,11 +200,11 @@ message Webhook {
|
|||
// }
|
||||
// ]
|
||||
// }
|
||||
//
|
||||
//
|
||||
// See
|
||||
// https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
||||
// for more examples of label selectors.
|
||||
//
|
||||
//
|
||||
// Default to the empty LabelSelector, which matches everything.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5;
|
||||
|
@ -223,47 +223,47 @@ message Webhook {
|
|||
// connection with the webhook
|
||||
message WebhookClientConfig {
|
||||
// `url` gives the location of the webhook, in standard URL form
|
||||
// (`[scheme://]host:port/path`). Exactly one of `url` or `service`
|
||||
// (`scheme://host:port/path`). Exactly one of `url` or `service`
|
||||
// must be specified.
|
||||
//
|
||||
//
|
||||
// The `host` should not refer to a service running in the cluster; use
|
||||
// the `service` field instead. The host might be resolved via external
|
||||
// DNS in some apiservers (e.g., `kube-apiserver` cannot resolve
|
||||
// in-cluster DNS as that would be a layering violation). `host` may
|
||||
// also be an IP address.
|
||||
//
|
||||
//
|
||||
// Please note that using `localhost` or `127.0.0.1` as a `host` is
|
||||
// risky unless you take great care to run this webhook on all hosts
|
||||
// which run an apiserver which might need to make calls to this
|
||||
// webhook. Such installs are likely to be non-portable, i.e., not easy
|
||||
// to turn up in a new cluster.
|
||||
//
|
||||
//
|
||||
// The scheme must be "https"; the URL must begin with "https://".
|
||||
//
|
||||
//
|
||||
// A path is optional, and if present may be any string permissible in
|
||||
// a URL. You may use the path to pass an arbitrary string to the
|
||||
// webhook, for example, a cluster identifier.
|
||||
//
|
||||
//
|
||||
// Attempting to use a user or basic auth e.g. "user:password@" is not
|
||||
// allowed. Fragments ("#...") and query parameters ("?...") are not
|
||||
// allowed, either.
|
||||
//
|
||||
//
|
||||
// +optional
|
||||
optional string url = 3;
|
||||
|
||||
// `service` is a reference to the service for this webhook. Either
|
||||
// `service` or `url` must be specified.
|
||||
//
|
||||
//
|
||||
// If the webhook is running within the cluster, then you should use `service`.
|
||||
//
|
||||
//
|
||||
// Port 443 will be used if it is open, otherwise it is an error.
|
||||
//
|
||||
//
|
||||
// +optional
|
||||
optional ServiceReference service = 1;
|
||||
|
||||
// `caBundle` is a PEM encoded CA bundle which will be used to validate
|
||||
// the webhook's server certificate.
|
||||
// Required.
|
||||
// `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate.
|
||||
// If unspecified, system trust roots on the apiserver are used.
|
||||
// +optional
|
||||
optional bytes caBundle = 2;
|
||||
}
|
||||
|
||||
|
|
12
vendor/k8s.io/api/admissionregistration/v1beta1/types.go
generated
vendored
12
vendor/k8s.io/api/admissionregistration/v1beta1/types.go
generated
vendored
|
@ -246,7 +246,7 @@ const (
|
|||
// connection with the webhook
|
||||
type WebhookClientConfig struct {
|
||||
// `url` gives the location of the webhook, in standard URL form
|
||||
// (`[scheme://]host:port/path`). Exactly one of `url` or `service`
|
||||
// (`scheme://host:port/path`). Exactly one of `url` or `service`
|
||||
// must be specified.
|
||||
//
|
||||
// The `host` should not refer to a service running in the cluster; use
|
||||
|
@ -282,12 +282,12 @@ type WebhookClientConfig struct {
|
|||
// Port 443 will be used if it is open, otherwise it is an error.
|
||||
//
|
||||
// +optional
|
||||
Service *ServiceReference `json:"service" protobuf:"bytes,1,opt,name=service"`
|
||||
Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"`
|
||||
|
||||
// `caBundle` is a PEM encoded CA bundle which will be used to validate
|
||||
// the webhook's server certificate.
|
||||
// Required.
|
||||
CABundle []byte `json:"caBundle" protobuf:"bytes,2,opt,name=caBundle"`
|
||||
// `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate.
|
||||
// If unspecified, system trust roots on the apiserver are used.
|
||||
// +optional
|
||||
CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"`
|
||||
}
|
||||
|
||||
// ServiceReference holds a reference to Service.legacy.k8s.io
|
||||
|
|
4
vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go
generated
vendored
4
vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go
generated
vendored
|
@ -114,9 +114,9 @@ func (Webhook) SwaggerDoc() map[string]string {
|
|||
|
||||
var map_WebhookClientConfig = map[string]string{
|
||||
"": "WebhookClientConfig contains the information to make a TLS connection with the webhook",
|
||||
"url": "`url` gives the location of the webhook, in standard URL form (`[scheme://]host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.",
|
||||
"url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.",
|
||||
"service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nPort 443 will be used if it is open, otherwise it is an error.",
|
||||
"caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. Required.",
|
||||
"caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.",
|
||||
}
|
||||
|
||||
func (WebhookClientConfig) SwaggerDoc() map[string]string {
|
||||
|
|
21
vendor/k8s.io/api/apps/v1/generated.pb.go
generated
vendored
21
vendor/k8s.io/api/apps/v1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1 is a generated protocol buffer package.
|
||||
|
@ -1440,24 +1439,6 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
|
1
vendor/k8s.io/api/apps/v1/generated.proto
generated
vendored
1
vendor/k8s.io/api/apps/v1/generated.proto
generated
vendored
|
@ -280,6 +280,7 @@ message DeploymentSpec {
|
|||
|
||||
// The deployment strategy to use to replace existing pods with new ones.
|
||||
// +optional
|
||||
// +patchStrategy=retainKeys
|
||||
optional DeploymentStrategy strategy = 4;
|
||||
|
||||
// Minimum number of seconds for which a newly created pod should be ready
|
||||
|
|
9
vendor/k8s.io/api/apps/v1/types.go
generated
vendored
9
vendor/k8s.io/api/apps/v1/types.go
generated
vendored
|
@ -32,6 +32,8 @@ const (
|
|||
)
|
||||
|
||||
// +genclient
|
||||
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// StatefulSet represents a set of pods with consistent identities.
|
||||
|
@ -244,6 +246,8 @@ type StatefulSetList struct {
|
|||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Deployment enables declarative updates for Pods and ReplicaSets.
|
||||
|
@ -279,7 +283,8 @@ type DeploymentSpec struct {
|
|||
|
||||
// The deployment strategy to use to replace existing pods with new ones.
|
||||
// +optional
|
||||
Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"`
|
||||
// +patchStrategy=retainKeys
|
||||
Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"`
|
||||
|
||||
// Minimum number of seconds for which a newly created pod should be ready
|
||||
// without any of its container crashing, for it to be considered available.
|
||||
|
@ -653,6 +658,8 @@ type DaemonSetList struct {
|
|||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ReplicaSet ensures that a specified number of pod replicas are running at any given time.
|
||||
|
|
2
vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
generated
vendored
2
vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
generated
vendored
|
@ -96,7 +96,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_DaemonSetStatus = map[string]string{
|
||||
"": "DaemonSetStatus represents the current status of a daemon set.",
|
||||
"": "DaemonSetStatus represents the current status of a daemon set.",
|
||||
"currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
|
||||
"numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
|
||||
"desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
|
||||
|
|
293
vendor/k8s.io/api/apps/v1beta1/generated.pb.go
generated
vendored
293
vendor/k8s.io/api/apps/v1beta1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1beta1 is a generated protocol buffer package.
|
||||
|
@ -1091,24 +1090,6 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
@ -2552,51 +2533,14 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var keykey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
keykey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
if m.UpdatedAnnotations == nil {
|
||||
m.UpdatedAnnotations = make(map[string]string)
|
||||
}
|
||||
if iNdEx < postIndex {
|
||||
var valuekey uint64
|
||||
var mapkey string
|
||||
var mapvalue string
|
||||
for iNdEx < postIndex {
|
||||
entryPreIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
|
@ -2606,41 +2550,80 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
valuekey |= (uint64(b) & 0x7F) << shift
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapvalue uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
fieldNum := int32(wire >> 3)
|
||||
if fieldNum == 1 {
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iNdEx >= l {
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
} else if fieldNum == 2 {
|
||||
var stringLenmapvalue uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapvalue := int(stringLenmapvalue)
|
||||
if intStringLenmapvalue < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||
iNdEx = postStringIndexmapvalue
|
||||
} else {
|
||||
iNdEx = entryPreIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
intStringLenmapvalue := int(stringLenmapvalue)
|
||||
if intStringLenmapvalue < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||
iNdEx = postStringIndexmapvalue
|
||||
m.UpdatedAnnotations[mapkey] = mapvalue
|
||||
} else {
|
||||
var mapvalue string
|
||||
m.UpdatedAnnotations[mapkey] = mapvalue
|
||||
}
|
||||
m.UpdatedAnnotations[mapkey] = mapvalue
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
|
@ -3833,51 +3816,14 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var keykey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
keykey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
if m.Selector == nil {
|
||||
m.Selector = make(map[string]string)
|
||||
}
|
||||
if iNdEx < postIndex {
|
||||
var valuekey uint64
|
||||
var mapkey string
|
||||
var mapvalue string
|
||||
for iNdEx < postIndex {
|
||||
entryPreIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
|
@ -3887,41 +3833,80 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
valuekey |= (uint64(b) & 0x7F) << shift
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapvalue uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
fieldNum := int32(wire >> 3)
|
||||
if fieldNum == 1 {
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iNdEx >= l {
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
} else if fieldNum == 2 {
|
||||
var stringLenmapvalue uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapvalue := int(stringLenmapvalue)
|
||||
if intStringLenmapvalue < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||
iNdEx = postStringIndexmapvalue
|
||||
} else {
|
||||
iNdEx = entryPreIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
intStringLenmapvalue := int(stringLenmapvalue)
|
||||
if intStringLenmapvalue < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||
iNdEx = postStringIndexmapvalue
|
||||
m.Selector[mapkey] = mapvalue
|
||||
} else {
|
||||
var mapvalue string
|
||||
m.Selector[mapkey] = mapvalue
|
||||
}
|
||||
m.Selector[mapkey] = mapvalue
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
|
|
1
vendor/k8s.io/api/apps/v1beta1/generated.proto
generated
vendored
1
vendor/k8s.io/api/apps/v1beta1/generated.proto
generated
vendored
|
@ -143,6 +143,7 @@ message DeploymentSpec {
|
|||
|
||||
// The deployment strategy to use to replace existing pods with new ones.
|
||||
// +optional
|
||||
// +patchStrategy=retainKeys
|
||||
optional DeploymentStrategy strategy = 4;
|
||||
|
||||
// Minimum number of seconds for which a newly created pod should be ready
|
||||
|
|
5
vendor/k8s.io/api/apps/v1beta1/types.go
generated
vendored
5
vendor/k8s.io/api/apps/v1beta1/types.go
generated
vendored
|
@ -55,8 +55,6 @@ type ScaleStatus struct {
|
|||
TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:noVerbs
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Scale represents a scaling request for a resource.
|
||||
|
@ -323,7 +321,8 @@ type DeploymentSpec struct {
|
|||
|
||||
// The deployment strategy to use to replace existing pods with new ones.
|
||||
// +optional
|
||||
Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"`
|
||||
// +patchStrategy=retainKeys
|
||||
Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"`
|
||||
|
||||
// Minimum number of seconds for which a newly created pod should be ready
|
||||
// without any of its container crashing, for it to be considered available.
|
||||
|
|
157
vendor/k8s.io/api/apps/v1beta2/generated.pb.go
generated
vendored
157
vendor/k8s.io/api/apps/v1beta2/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1beta2 is a generated protocol buffer package.
|
||||
|
@ -1570,24 +1569,6 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
@ -6109,51 +6090,14 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var keykey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
keykey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
if m.Selector == nil {
|
||||
m.Selector = make(map[string]string)
|
||||
}
|
||||
if iNdEx < postIndex {
|
||||
var valuekey uint64
|
||||
var mapkey string
|
||||
var mapvalue string
|
||||
for iNdEx < postIndex {
|
||||
entryPreIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
|
@ -6163,41 +6107,80 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
valuekey |= (uint64(b) & 0x7F) << shift
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapvalue uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
fieldNum := int32(wire >> 3)
|
||||
if fieldNum == 1 {
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iNdEx >= l {
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
} else if fieldNum == 2 {
|
||||
var stringLenmapvalue uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapvalue := int(stringLenmapvalue)
|
||||
if intStringLenmapvalue < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||
iNdEx = postStringIndexmapvalue
|
||||
} else {
|
||||
iNdEx = entryPreIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
intStringLenmapvalue := int(stringLenmapvalue)
|
||||
if intStringLenmapvalue < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||
iNdEx = postStringIndexmapvalue
|
||||
m.Selector[mapkey] = mapvalue
|
||||
} else {
|
||||
var mapvalue string
|
||||
m.Selector[mapkey] = mapvalue
|
||||
}
|
||||
m.Selector[mapkey] = mapvalue
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
|
|
1
vendor/k8s.io/api/apps/v1beta2/generated.proto
generated
vendored
1
vendor/k8s.io/api/apps/v1beta2/generated.proto
generated
vendored
|
@ -286,6 +286,7 @@ message DeploymentSpec {
|
|||
|
||||
// The deployment strategy to use to replace existing pods with new ones.
|
||||
// +optional
|
||||
// +patchStrategy=retainKeys
|
||||
optional DeploymentStrategy strategy = 4;
|
||||
|
||||
// Minimum number of seconds for which a newly created pod should be ready
|
||||
|
|
5
vendor/k8s.io/api/apps/v1beta2/types.go
generated
vendored
5
vendor/k8s.io/api/apps/v1beta2/types.go
generated
vendored
|
@ -57,8 +57,6 @@ type ScaleStatus struct {
|
|||
TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:noVerbs
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Scale represents a scaling request for a resource.
|
||||
|
@ -331,7 +329,8 @@ type DeploymentSpec struct {
|
|||
|
||||
// The deployment strategy to use to replace existing pods with new ones.
|
||||
// +optional
|
||||
Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"`
|
||||
// +patchStrategy=retainKeys
|
||||
Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"`
|
||||
|
||||
// Minimum number of seconds for which a newly created pod should be ready
|
||||
// without any of its container crashing, for it to be considered available.
|
||||
|
|
2
vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
generated
vendored
2
vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
generated
vendored
|
@ -96,7 +96,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_DaemonSetStatus = map[string]string{
|
||||
"": "DaemonSetStatus represents the current status of a daemon set.",
|
||||
"": "DaemonSetStatus represents the current status of a daemon set.",
|
||||
"currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
|
||||
"numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
|
||||
"desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
|
||||
|
|
22
vendor/k8s.io/api/auditregistration/v1alpha1/doc.go
generated
vendored
Normal file
22
vendor/k8s.io/api/auditregistration/v1alpha1/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
|
||||
// +groupName=auditregistration.k8s.io
|
||||
|
||||
package v1alpha1 // import "k8s.io/api/auditregistration/v1alpha1"
|
1685
vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go
generated
vendored
Normal file
1685
vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
158
vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto
generated
vendored
Normal file
158
vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto
generated
vendored
Normal file
|
@ -0,0 +1,158 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
|
||||
|
||||
syntax = 'proto2';
|
||||
|
||||
package k8s.io.api.auditregistration.v1alpha1;
|
||||
|
||||
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
|
||||
|
||||
// Package-wide variables from generator "generated".
|
||||
option go_package = "v1alpha1";
|
||||
|
||||
// AuditSink represents a cluster level audit sink
|
||||
message AuditSink {
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Spec defines the audit configuration spec
|
||||
optional AuditSinkSpec spec = 2;
|
||||
}
|
||||
|
||||
// AuditSinkList is a list of AuditSink items.
|
||||
message AuditSinkList {
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// List of audit configurations.
|
||||
repeated AuditSink items = 2;
|
||||
}
|
||||
|
||||
// AuditSinkSpec holds the spec for the audit sink
|
||||
message AuditSinkSpec {
|
||||
// Policy defines the policy for selecting which events should be sent to the webhook
|
||||
// required
|
||||
optional Policy policy = 1;
|
||||
|
||||
// Webhook to send events
|
||||
// required
|
||||
optional Webhook webhook = 2;
|
||||
}
|
||||
|
||||
// Policy defines the configuration of how audit events are logged
|
||||
message Policy {
|
||||
// The Level that all requests are recorded at.
|
||||
// available options: None, Metadata, Request, RequestResponse
|
||||
// required
|
||||
optional string level = 1;
|
||||
|
||||
// Stages is a list of stages for which events are created.
|
||||
// +optional
|
||||
repeated string stages = 2;
|
||||
}
|
||||
|
||||
// ServiceReference holds a reference to Service.legacy.k8s.io
|
||||
message ServiceReference {
|
||||
// `namespace` is the namespace of the service.
|
||||
// Required
|
||||
optional string namespace = 1;
|
||||
|
||||
// `name` is the name of the service.
|
||||
// Required
|
||||
optional string name = 2;
|
||||
|
||||
// `path` is an optional URL path which will be sent in any request to
|
||||
// this service.
|
||||
// +optional
|
||||
optional string path = 3;
|
||||
}
|
||||
|
||||
// Webhook holds the configuration of the webhook
|
||||
message Webhook {
|
||||
// Throttle holds the options for throttling the webhook
|
||||
// +optional
|
||||
optional WebhookThrottleConfig throttle = 1;
|
||||
|
||||
// ClientConfig holds the connection parameters for the webhook
|
||||
// required
|
||||
optional WebhookClientConfig clientConfig = 2;
|
||||
}
|
||||
|
||||
// WebhookClientConfig contains the information to make a connection with the webhook
|
||||
message WebhookClientConfig {
|
||||
// `url` gives the location of the webhook, in standard URL form
|
||||
// (`scheme://host:port/path`). Exactly one of `url` or `service`
|
||||
// must be specified.
|
||||
//
|
||||
// The `host` should not refer to a service running in the cluster; use
|
||||
// the `service` field instead. The host might be resolved via external
|
||||
// DNS in some apiservers (e.g., `kube-apiserver` cannot resolve
|
||||
// in-cluster DNS as that would be a layering violation). `host` may
|
||||
// also be an IP address.
|
||||
//
|
||||
// Please note that using `localhost` or `127.0.0.1` as a `host` is
|
||||
// risky unless you take great care to run this webhook on all hosts
|
||||
// which run an apiserver which might need to make calls to this
|
||||
// webhook. Such installs are likely to be non-portable, i.e., not easy
|
||||
// to turn up in a new cluster.
|
||||
//
|
||||
// The scheme must be "https"; the URL must begin with "https://".
|
||||
//
|
||||
// A path is optional, and if present may be any string permissible in
|
||||
// a URL. You may use the path to pass an arbitrary string to the
|
||||
// webhook, for example, a cluster identifier.
|
||||
//
|
||||
// Attempting to use a user or basic auth e.g. "user:password@" is not
|
||||
// allowed. Fragments ("#...") and query parameters ("?...") are not
|
||||
// allowed, either.
|
||||
//
|
||||
// +optional
|
||||
optional string url = 1;
|
||||
|
||||
// `service` is a reference to the service for this webhook. Either
|
||||
// `service` or `url` must be specified.
|
||||
//
|
||||
// If the webhook is running within the cluster, then you should use `service`.
|
||||
//
|
||||
// Port 443 will be used if it is open, otherwise it is an error.
|
||||
//
|
||||
// +optional
|
||||
optional ServiceReference service = 2;
|
||||
|
||||
// `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate.
|
||||
// If unspecified, system trust roots on the apiserver are used.
|
||||
// +optional
|
||||
optional bytes caBundle = 3;
|
||||
}
|
||||
|
||||
// WebhookThrottleConfig holds the configuration for throttling events
|
||||
message WebhookThrottleConfig {
|
||||
// ThrottleQPS maximum number of batches per second
|
||||
// default 10 QPS
|
||||
// +optional
|
||||
optional int64 qps = 1;
|
||||
|
||||
// ThrottleBurst is the maximum number of events sent at the same moment
|
||||
// default 15 QPS
|
||||
// +optional
|
||||
optional int64 burst = 2;
|
||||
}
|
||||
|
56
vendor/k8s.io/api/auditregistration/v1alpha1/register.go
generated
vendored
Normal file
56
vendor/k8s.io/api/auditregistration/v1alpha1/register.go
generated
vendored
Normal file
|
@ -0,0 +1,56 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// GroupName is the group name use in this package
|
||||
const GroupName = "auditregistration.k8s.io"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
var (
|
||||
SchemeBuilder runtime.SchemeBuilder
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addKnownTypes)
|
||||
}
|
||||
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&AuditSink{},
|
||||
&AuditSinkList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
194
vendor/k8s.io/api/auditregistration/v1alpha1/types.go
generated
vendored
Normal file
194
vendor/k8s.io/api/auditregistration/v1alpha1/types.go
generated
vendored
Normal file
|
@ -0,0 +1,194 @@
|
|||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// Level defines the amount of information logged during auditing
|
||||
type Level string
|
||||
|
||||
// Valid audit levels
|
||||
const (
|
||||
// LevelNone disables auditing
|
||||
LevelNone Level = "None"
|
||||
// LevelMetadata provides the basic level of auditing.
|
||||
LevelMetadata Level = "Metadata"
|
||||
// LevelRequest provides Metadata level of auditing, and additionally
|
||||
// logs the request object (does not apply for non-resource requests).
|
||||
LevelRequest Level = "Request"
|
||||
// LevelRequestResponse provides Request level of auditing, and additionally
|
||||
// logs the response object (does not apply for non-resource requests and watches).
|
||||
LevelRequestResponse Level = "RequestResponse"
|
||||
)
|
||||
|
||||
// Stage defines the stages in request handling during which audit events may be generated.
|
||||
type Stage string
|
||||
|
||||
// Valid audit stages.
|
||||
const (
|
||||
// The stage for events generated after the audit handler receives the request, but before it
|
||||
// is delegated down the handler chain.
|
||||
StageRequestReceived = "RequestReceived"
|
||||
// The stage for events generated after the response headers are sent, but before the response body
|
||||
// is sent. This stage is only generated for long-running requests (e.g. watch).
|
||||
StageResponseStarted = "ResponseStarted"
|
||||
// The stage for events generated after the response body has been completed, and no more bytes
|
||||
// will be sent.
|
||||
StageResponseComplete = "ResponseComplete"
|
||||
// The stage for events generated when a panic occurred.
|
||||
StagePanic = "Panic"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// AuditSink represents a cluster level audit sink
|
||||
type AuditSink struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Spec defines the audit configuration spec
|
||||
Spec AuditSinkSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
// AuditSinkSpec holds the spec for the audit sink
|
||||
type AuditSinkSpec struct {
|
||||
// Policy defines the policy for selecting which events should be sent to the webhook
|
||||
// required
|
||||
Policy Policy `json:"policy" protobuf:"bytes,1,opt,name=policy"`
|
||||
|
||||
// Webhook to send events
|
||||
// required
|
||||
Webhook Webhook `json:"webhook" protobuf:"bytes,2,opt,name=webhook"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// AuditSinkList is a list of AuditSink items.
|
||||
type AuditSinkList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// List of audit configurations.
|
||||
Items []AuditSink `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// Policy defines the configuration of how audit events are logged
|
||||
type Policy struct {
|
||||
// The Level that all requests are recorded at.
|
||||
// available options: None, Metadata, Request, RequestResponse
|
||||
// required
|
||||
Level Level `json:"level" protobuf:"bytes,1,opt,name=level"`
|
||||
|
||||
// Stages is a list of stages for which events are created.
|
||||
// +optional
|
||||
Stages []Stage `json:"stages" protobuf:"bytes,2,opt,name=stages"`
|
||||
}
|
||||
|
||||
// Webhook holds the configuration of the webhook
|
||||
type Webhook struct {
|
||||
// Throttle holds the options for throttling the webhook
|
||||
// +optional
|
||||
Throttle *WebhookThrottleConfig `json:"throttle,omitempty" protobuf:"bytes,1,opt,name=throttle"`
|
||||
|
||||
// ClientConfig holds the connection parameters for the webhook
|
||||
// required
|
||||
ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"`
|
||||
}
|
||||
|
||||
// WebhookThrottleConfig holds the configuration for throttling events
|
||||
type WebhookThrottleConfig struct {
|
||||
// ThrottleQPS maximum number of batches per second
|
||||
// default 10 QPS
|
||||
// +optional
|
||||
QPS *int64 `json:"qps,omitempty" protobuf:"bytes,1,opt,name=qps"`
|
||||
|
||||
// ThrottleBurst is the maximum number of events sent at the same moment
|
||||
// default 15 QPS
|
||||
// +optional
|
||||
Burst *int64 `json:"burst,omitempty" protobuf:"bytes,2,opt,name=burst"`
|
||||
}
|
||||
|
||||
// WebhookClientConfig contains the information to make a connection with the webhook
|
||||
type WebhookClientConfig struct {
|
||||
// `url` gives the location of the webhook, in standard URL form
|
||||
// (`scheme://host:port/path`). Exactly one of `url` or `service`
|
||||
// must be specified.
|
||||
//
|
||||
// The `host` should not refer to a service running in the cluster; use
|
||||
// the `service` field instead. The host might be resolved via external
|
||||
// DNS in some apiservers (e.g., `kube-apiserver` cannot resolve
|
||||
// in-cluster DNS as that would be a layering violation). `host` may
|
||||
// also be an IP address.
|
||||
//
|
||||
// Please note that using `localhost` or `127.0.0.1` as a `host` is
|
||||
// risky unless you take great care to run this webhook on all hosts
|
||||
// which run an apiserver which might need to make calls to this
|
||||
// webhook. Such installs are likely to be non-portable, i.e., not easy
|
||||
// to turn up in a new cluster.
|
||||
//
|
||||
// The scheme must be "https"; the URL must begin with "https://".
|
||||
//
|
||||
// A path is optional, and if present may be any string permissible in
|
||||
// a URL. You may use the path to pass an arbitrary string to the
|
||||
// webhook, for example, a cluster identifier.
|
||||
//
|
||||
// Attempting to use a user or basic auth e.g. "user:password@" is not
|
||||
// allowed. Fragments ("#...") and query parameters ("?...") are not
|
||||
// allowed, either.
|
||||
//
|
||||
// +optional
|
||||
URL *string `json:"url,omitempty" protobuf:"bytes,1,opt,name=url"`
|
||||
|
||||
// `service` is a reference to the service for this webhook. Either
|
||||
// `service` or `url` must be specified.
|
||||
//
|
||||
// If the webhook is running within the cluster, then you should use `service`.
|
||||
//
|
||||
// Port 443 will be used if it is open, otherwise it is an error.
|
||||
//
|
||||
// +optional
|
||||
Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,2,opt,name=service"`
|
||||
|
||||
// `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate.
|
||||
// If unspecified, system trust roots on the apiserver are used.
|
||||
// +optional
|
||||
CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,3,opt,name=caBundle"`
|
||||
}
|
||||
|
||||
// ServiceReference holds a reference to Service.legacy.k8s.io
|
||||
type ServiceReference struct {
|
||||
// `namespace` is the namespace of the service.
|
||||
// Required
|
||||
Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"`
|
||||
|
||||
// `name` is the name of the service.
|
||||
// Required
|
||||
Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
|
||||
|
||||
// `path` is an optional URL path which will be sent in any request to
|
||||
// this service.
|
||||
// +optional
|
||||
Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"`
|
||||
}
|
110
vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go
generated
vendored
Normal file
110
vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go
generated
vendored
Normal file
|
@ -0,0 +1,110 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
// This file contains a collection of methods that can be used from go-restful to
|
||||
// generate Swagger API documentation for its models. Please read this PR for more
|
||||
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
|
||||
//
|
||||
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
|
||||
// they are on one line! For multiple line or blocks that you want to ignore use ---.
|
||||
// Any context after a --- is ignored.
|
||||
//
|
||||
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
|
||||
var map_AuditSink = map[string]string{
|
||||
"": "AuditSink represents a cluster level audit sink",
|
||||
"spec": "Spec defines the audit configuration spec",
|
||||
}
|
||||
|
||||
func (AuditSink) SwaggerDoc() map[string]string {
|
||||
return map_AuditSink
|
||||
}
|
||||
|
||||
var map_AuditSinkList = map[string]string{
|
||||
"": "AuditSinkList is a list of AuditSink items.",
|
||||
"items": "List of audit configurations.",
|
||||
}
|
||||
|
||||
func (AuditSinkList) SwaggerDoc() map[string]string {
|
||||
return map_AuditSinkList
|
||||
}
|
||||
|
||||
var map_AuditSinkSpec = map[string]string{
|
||||
"": "AuditSinkSpec holds the spec for the audit sink",
|
||||
"policy": "Policy defines the policy for selecting which events should be sent to the webhook required",
|
||||
"webhook": "Webhook to send events required",
|
||||
}
|
||||
|
||||
func (AuditSinkSpec) SwaggerDoc() map[string]string {
|
||||
return map_AuditSinkSpec
|
||||
}
|
||||
|
||||
var map_Policy = map[string]string{
|
||||
"": "Policy defines the configuration of how audit events are logged",
|
||||
"level": "The Level that all requests are recorded at. available options: None, Metadata, Request, RequestResponse required",
|
||||
"stages": "Stages is a list of stages for which events are created.",
|
||||
}
|
||||
|
||||
func (Policy) SwaggerDoc() map[string]string {
|
||||
return map_Policy
|
||||
}
|
||||
|
||||
var map_ServiceReference = map[string]string{
|
||||
"": "ServiceReference holds a reference to Service.legacy.k8s.io",
|
||||
"namespace": "`namespace` is the namespace of the service. Required",
|
||||
"name": "`name` is the name of the service. Required",
|
||||
"path": "`path` is an optional URL path which will be sent in any request to this service.",
|
||||
}
|
||||
|
||||
func (ServiceReference) SwaggerDoc() map[string]string {
|
||||
return map_ServiceReference
|
||||
}
|
||||
|
||||
var map_Webhook = map[string]string{
|
||||
"": "Webhook holds the configuration of the webhook",
|
||||
"throttle": "Throttle holds the options for throttling the webhook",
|
||||
"clientConfig": "ClientConfig holds the connection parameters for the webhook required",
|
||||
}
|
||||
|
||||
func (Webhook) SwaggerDoc() map[string]string {
|
||||
return map_Webhook
|
||||
}
|
||||
|
||||
var map_WebhookClientConfig = map[string]string{
|
||||
"": "WebhookClientConfig contains the information to make a connection with the webhook",
|
||||
"url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.",
|
||||
"service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nPort 443 will be used if it is open, otherwise it is an error.",
|
||||
"caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.",
|
||||
}
|
||||
|
||||
func (WebhookClientConfig) SwaggerDoc() map[string]string {
|
||||
return map_WebhookClientConfig
|
||||
}
|
||||
|
||||
var map_WebhookThrottleConfig = map[string]string{
|
||||
"": "WebhookThrottleConfig holds the configuration for throttling events",
|
||||
"qps": "ThrottleQPS maximum number of batches per second default 10 QPS",
|
||||
"burst": "ThrottleBurst is the maximum number of events sent at the same moment default 15 QPS",
|
||||
}
|
||||
|
||||
func (WebhookThrottleConfig) SwaggerDoc() map[string]string {
|
||||
return map_WebhookThrottleConfig
|
||||
}
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS END HERE
|
224
vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go
generated
vendored
Normal file
224
vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go
generated
vendored
Normal file
|
@ -0,0 +1,224 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AuditSink) DeepCopyInto(out *AuditSink) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSink.
|
||||
func (in *AuditSink) DeepCopy() *AuditSink {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AuditSink)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *AuditSink) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AuditSinkList) DeepCopyInto(out *AuditSinkList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]AuditSink, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSinkList.
|
||||
func (in *AuditSinkList) DeepCopy() *AuditSinkList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AuditSinkList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *AuditSinkList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AuditSinkSpec) DeepCopyInto(out *AuditSinkSpec) {
|
||||
*out = *in
|
||||
in.Policy.DeepCopyInto(&out.Policy)
|
||||
in.Webhook.DeepCopyInto(&out.Webhook)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSinkSpec.
|
||||
func (in *AuditSinkSpec) DeepCopy() *AuditSinkSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AuditSinkSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Policy) DeepCopyInto(out *Policy) {
|
||||
*out = *in
|
||||
if in.Stages != nil {
|
||||
in, out := &in.Stages, &out.Stages
|
||||
*out = make([]Stage, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy.
|
||||
func (in *Policy) DeepCopy() *Policy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Policy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceReference) DeepCopyInto(out *ServiceReference) {
|
||||
*out = *in
|
||||
if in.Path != nil {
|
||||
in, out := &in.Path, &out.Path
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference.
|
||||
func (in *ServiceReference) DeepCopy() *ServiceReference {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceReference)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Webhook) DeepCopyInto(out *Webhook) {
|
||||
*out = *in
|
||||
if in.Throttle != nil {
|
||||
in, out := &in.Throttle, &out.Throttle
|
||||
*out = new(WebhookThrottleConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.ClientConfig.DeepCopyInto(&out.ClientConfig)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook.
|
||||
func (in *Webhook) DeepCopy() *Webhook {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Webhook)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) {
|
||||
*out = *in
|
||||
if in.URL != nil {
|
||||
in, out := &in.URL, &out.URL
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.Service != nil {
|
||||
in, out := &in.Service, &out.Service
|
||||
*out = new(ServiceReference)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.CABundle != nil {
|
||||
in, out := &in.CABundle, &out.CABundle
|
||||
*out = make([]byte, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig.
|
||||
func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WebhookClientConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WebhookThrottleConfig) DeepCopyInto(out *WebhookThrottleConfig) {
|
||||
*out = *in
|
||||
if in.QPS != nil {
|
||||
in, out := &in.QPS, &out.QPS
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
if in.Burst != nil {
|
||||
in, out := &in.Burst, &out.Burst
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookThrottleConfig.
|
||||
func (in *WebhookThrottleConfig) DeepCopy() *WebhookThrottleConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WebhookThrottleConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
1
vendor/k8s.io/api/authentication/v1/doc.go
generated
vendored
1
vendor/k8s.io/api/authentication/v1/doc.go
generated
vendored
|
@ -17,4 +17,5 @@ limitations under the License.
|
|||
// +k8s:deepcopy-gen=package
|
||||
// +groupName=authentication.k8s.io
|
||||
// +k8s:openapi-gen=true
|
||||
|
||||
package v1 // import "k8s.io/api/authentication/v1"
|
||||
|
|
384
vendor/k8s.io/api/authentication/v1/generated.pb.go
generated
vendored
384
vendor/k8s.io/api/authentication/v1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1 is a generated protocol buffer package.
|
||||
|
@ -356,6 +355,21 @@ func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) {
|
|||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token)))
|
||||
i += copy(dAtA[i:], m.Token)
|
||||
if len(m.Audiences) > 0 {
|
||||
for _, s := range m.Audiences {
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
l >>= 7
|
||||
i++
|
||||
}
|
||||
dAtA[i] = uint8(l)
|
||||
i++
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
|
@ -394,6 +408,21 @@ func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) {
|
|||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error)))
|
||||
i += copy(dAtA[i:], m.Error)
|
||||
if len(m.Audiences) > 0 {
|
||||
for _, s := range m.Audiences {
|
||||
dAtA[i] = 0x22
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
l >>= 7
|
||||
i++
|
||||
}
|
||||
dAtA[i] = uint8(l)
|
||||
i++
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
|
@ -469,24 +498,6 @@ func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
@ -580,6 +591,12 @@ func (m *TokenReviewSpec) Size() (n int) {
|
|||
_ = l
|
||||
l = len(m.Token)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
if len(m.Audiences) > 0 {
|
||||
for _, s := range m.Audiences {
|
||||
l = len(s)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
|
@ -591,6 +608,12 @@ func (m *TokenReviewStatus) Size() (n int) {
|
|||
n += 1 + l + sovGenerated(uint64(l))
|
||||
l = len(m.Error)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
if len(m.Audiences) > 0 {
|
||||
for _, s := range m.Audiences {
|
||||
l = len(s)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
|
@ -698,6 +721,7 @@ func (this *TokenReviewSpec) String() string {
|
|||
}
|
||||
s := strings.Join([]string{`&TokenReviewSpec{`,
|
||||
`Token:` + fmt.Sprintf("%v", this.Token) + `,`,
|
||||
`Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
|
@ -710,6 +734,7 @@ func (this *TokenReviewStatus) String() string {
|
|||
`Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`,
|
||||
`User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`,
|
||||
`Error:` + fmt.Sprintf("%v", this.Error) + `,`,
|
||||
`Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
|
@ -1569,6 +1594,35 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
m.Token = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
|
@ -1698,6 +1752,35 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
m.Error = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 4:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
|
@ -1861,51 +1944,14 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var keykey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
keykey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
if m.Extra == nil {
|
||||
m.Extra = make(map[string]ExtraValue)
|
||||
}
|
||||
if iNdEx < postIndex {
|
||||
var valuekey uint64
|
||||
var mapkey string
|
||||
mapvalue := &ExtraValue{}
|
||||
for iNdEx < postIndex {
|
||||
entryPreIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
|
@ -1915,46 +1961,85 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
valuekey |= (uint64(b) & 0x7F) << shift
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var mapmsglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
fieldNum := int32(wire >> 3)
|
||||
if fieldNum == 1 {
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iNdEx >= l {
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
mapmsglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
} else if fieldNum == 2 {
|
||||
var mapmsglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
mapmsglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postmsgIndex := iNdEx + mapmsglen
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postmsgIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue = &ExtraValue{}
|
||||
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postmsgIndex
|
||||
} else {
|
||||
iNdEx = entryPreIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postmsgIndex := iNdEx + mapmsglen
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postmsgIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue := &ExtraValue{}
|
||||
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postmsgIndex
|
||||
m.Extra[mapkey] = *mapvalue
|
||||
} else {
|
||||
var mapvalue ExtraValue
|
||||
m.Extra[mapkey] = mapvalue
|
||||
}
|
||||
m.Extra[mapkey] = *mapvalue
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
|
@ -2087,61 +2172,62 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptorGenerated = []byte{
|
||||
// 892 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x8f, 0xdb, 0x44,
|
||||
0x14, 0x8e, 0xf3, 0x63, 0xb5, 0x99, 0x74, 0x97, 0xdd, 0x29, 0x95, 0xa2, 0x05, 0xec, 0x60, 0x24,
|
||||
0x14, 0x01, 0xb5, 0x9b, 0x08, 0x95, 0xaa, 0x48, 0x48, 0x6b, 0x36, 0x82, 0x08, 0x41, 0xab, 0x69,
|
||||
0x77, 0x41, 0x9c, 0x98, 0xd8, 0x6f, 0xb3, 0x26, 0x78, 0x6c, 0xec, 0x71, 0x68, 0x6e, 0xfd, 0x13,
|
||||
0x38, 0x82, 0xc4, 0x81, 0x3f, 0x02, 0x89, 0x23, 0xd7, 0x3d, 0x56, 0x9c, 0x7a, 0x40, 0x11, 0x6b,
|
||||
0xfe, 0x05, 0x4e, 0x9c, 0xd0, 0x8c, 0x67, 0xe3, 0xfc, 0xd8, 0x4d, 0x73, 0xea, 0x2d, 0xf3, 0xde,
|
||||
0xf7, 0xbe, 0x79, 0xef, 0x9b, 0x2f, 0xcf, 0xa8, 0x37, 0xba, 0x97, 0x58, 0x7e, 0x68, 0x8f, 0xd2,
|
||||
0x01, 0xc4, 0x0c, 0x38, 0x24, 0xf6, 0x18, 0x98, 0x17, 0xc6, 0xb6, 0x4a, 0xd0, 0xc8, 0xb7, 0x69,
|
||||
0xca, 0xcf, 0x80, 0x71, 0xdf, 0xa5, 0xdc, 0x0f, 0x99, 0x3d, 0xee, 0xd8, 0x43, 0x60, 0x10, 0x53,
|
||||
0x0e, 0x9e, 0x15, 0xc5, 0x21, 0x0f, 0xf1, 0xeb, 0x39, 0xda, 0xa2, 0x91, 0x6f, 0x2d, 0xa2, 0xad,
|
||||
0x71, 0xe7, 0xe0, 0xf6, 0xd0, 0xe7, 0x67, 0xe9, 0xc0, 0x72, 0xc3, 0xc0, 0x1e, 0x86, 0xc3, 0xd0,
|
||||
0x96, 0x45, 0x83, 0xf4, 0x54, 0x9e, 0xe4, 0x41, 0xfe, 0xca, 0xc9, 0x0e, 0xde, 0x2f, 0xae, 0x0e,
|
||||
0xa8, 0x7b, 0xe6, 0x33, 0x88, 0x27, 0x76, 0x34, 0x1a, 0x8a, 0x40, 0x62, 0x07, 0xc0, 0xe9, 0x15,
|
||||
0x2d, 0x1c, 0xd8, 0xd7, 0x55, 0xc5, 0x29, 0xe3, 0x7e, 0x00, 0x2b, 0x05, 0x77, 0x5f, 0x54, 0x90,
|
||||
0xb8, 0x67, 0x10, 0xd0, 0xe5, 0x3a, 0xf3, 0x4f, 0x0d, 0xbd, 0xea, 0x84, 0x29, 0xf3, 0x1e, 0x0c,
|
||||
0xbe, 0x05, 0x97, 0x13, 0x38, 0x85, 0x18, 0x98, 0x0b, 0xb8, 0x85, 0xaa, 0x23, 0x9f, 0x79, 0x4d,
|
||||
0xad, 0xa5, 0xb5, 0xeb, 0xce, 0x8d, 0xf3, 0xa9, 0x51, 0xca, 0xa6, 0x46, 0xf5, 0x33, 0x9f, 0x79,
|
||||
0x44, 0x66, 0x70, 0x17, 0x21, 0xfa, 0xb0, 0x7f, 0x02, 0x71, 0xe2, 0x87, 0xac, 0x59, 0x96, 0x38,
|
||||
0xac, 0x70, 0xe8, 0x70, 0x96, 0x21, 0x73, 0x28, 0xc1, 0xca, 0x68, 0x00, 0xcd, 0xca, 0x22, 0xeb,
|
||||
0x17, 0x34, 0x00, 0x22, 0x33, 0xd8, 0x41, 0x95, 0xb4, 0x7f, 0xd4, 0xac, 0x4a, 0xc0, 0x1d, 0x05,
|
||||
0xa8, 0x1c, 0xf7, 0x8f, 0xfe, 0x9b, 0x1a, 0x6f, 0x5e, 0x37, 0x24, 0x9f, 0x44, 0x90, 0x58, 0xc7,
|
||||
0xfd, 0x23, 0x22, 0x8a, 0xcd, 0x0f, 0x10, 0xea, 0x3d, 0xe1, 0x31, 0x3d, 0xa1, 0xdf, 0xa5, 0x80,
|
||||
0x0d, 0x54, 0xf3, 0x39, 0x04, 0x49, 0x53, 0x6b, 0x55, 0xda, 0x75, 0xa7, 0x9e, 0x4d, 0x8d, 0x5a,
|
||||
0x5f, 0x04, 0x48, 0x1e, 0xbf, 0xbf, 0xfd, 0xd3, 0xaf, 0x46, 0xe9, 0xe9, 0x5f, 0xad, 0x92, 0xf9,
|
||||
0x4b, 0x19, 0xdd, 0x78, 0x1c, 0x8e, 0x80, 0x11, 0xf8, 0x3e, 0x85, 0x84, 0xe3, 0x6f, 0xd0, 0xb6,
|
||||
0x78, 0x22, 0x8f, 0x72, 0x2a, 0x95, 0x68, 0x74, 0xef, 0x58, 0x85, 0x3b, 0x66, 0x4d, 0x58, 0xd1,
|
||||
0x68, 0x28, 0x02, 0x89, 0x25, 0xd0, 0xd6, 0xb8, 0x63, 0xe5, 0x72, 0x7e, 0x0e, 0x9c, 0x16, 0x9a,
|
||||
0x14, 0x31, 0x32, 0x63, 0xc5, 0x0f, 0x51, 0x35, 0x89, 0xc0, 0x95, 0xfa, 0x35, 0xba, 0x96, 0xb5,
|
||||
0xce, 0x7b, 0xd6, 0x7c, 0x6f, 0x8f, 0x22, 0x70, 0x0b, 0x05, 0xc5, 0x89, 0x48, 0x26, 0xfc, 0x15,
|
||||
0xda, 0x4a, 0x38, 0xe5, 0x69, 0x22, 0x55, 0x5e, 0xec, 0xf8, 0x45, 0x9c, 0xb2, 0xce, 0xd9, 0x55,
|
||||
0xac, 0x5b, 0xf9, 0x99, 0x28, 0x3e, 0xf3, 0x5f, 0x0d, 0xed, 0x2d, 0xb7, 0x80, 0xdf, 0x45, 0x75,
|
||||
0x9a, 0x7a, 0xbe, 0x30, 0xcd, 0xa5, 0xc4, 0x3b, 0xd9, 0xd4, 0xa8, 0x1f, 0x5e, 0x06, 0x49, 0x91,
|
||||
0xc7, 0x0c, 0xed, 0x0e, 0x16, 0xdc, 0xa6, 0x7a, 0xec, 0xae, 0xef, 0xf1, 0x2a, 0x87, 0x3a, 0x38,
|
||||
0x9b, 0x1a, 0xbb, 0x8b, 0x19, 0xb2, 0xc4, 0x8e, 0x3f, 0x46, 0xfb, 0xf0, 0x24, 0xf2, 0x63, 0xc9,
|
||||
0xf4, 0x08, 0xdc, 0x90, 0x79, 0x89, 0xf4, 0x56, 0xc5, 0xb9, 0x95, 0x4d, 0x8d, 0xfd, 0xde, 0x72,
|
||||
0x92, 0xac, 0xe2, 0xcd, 0xdf, 0x34, 0x84, 0x57, 0x55, 0xc2, 0x6f, 0xa1, 0x1a, 0x17, 0x51, 0xf5,
|
||||
0x17, 0xd9, 0x51, 0xa2, 0xd5, 0x72, 0x68, 0x9e, 0xc3, 0x13, 0x74, 0xb3, 0x20, 0x7c, 0xec, 0x07,
|
||||
0x90, 0x70, 0x1a, 0x44, 0xea, 0xb5, 0xdf, 0xd9, 0xcc, 0x4b, 0xa2, 0xcc, 0x79, 0x4d, 0xd1, 0xdf,
|
||||
0xec, 0xad, 0xd2, 0x91, 0xab, 0xee, 0x30, 0x7f, 0x2e, 0xa3, 0x86, 0x6a, 0x7b, 0xec, 0xc3, 0x0f,
|
||||
0x2f, 0xc1, 0xcb, 0x0f, 0x16, 0xbc, 0x7c, 0x7b, 0x23, 0xdf, 0x89, 0xd6, 0xae, 0xb5, 0xf2, 0x97,
|
||||
0x4b, 0x56, 0xb6, 0x37, 0xa7, 0x5c, 0xef, 0xe4, 0xbb, 0xe8, 0x95, 0xa5, 0xfb, 0x37, 0x7a, 0x4e,
|
||||
0xf3, 0x0f, 0x0d, 0xed, 0xaf, 0xdc, 0x82, 0x3f, 0x44, 0x3b, 0x73, 0xcd, 0x40, 0xbe, 0x34, 0xb7,
|
||||
0x9d, 0x5b, 0x8a, 0x62, 0xe7, 0x70, 0x3e, 0x49, 0x16, 0xb1, 0xf8, 0x53, 0x54, 0x4d, 0x13, 0x88,
|
||||
0x95, 0x68, 0x6f, 0xaf, 0x9f, 0xf0, 0x38, 0x81, 0xb8, 0xcf, 0x4e, 0xc3, 0x42, 0x2d, 0x11, 0x21,
|
||||
0x92, 0x41, 0x4c, 0x00, 0x71, 0x1c, 0xc6, 0x6a, 0xbb, 0xce, 0x26, 0xe8, 0x89, 0x20, 0xc9, 0x73,
|
||||
0xe6, 0xef, 0x65, 0xb4, 0x7d, 0xc9, 0x82, 0xdf, 0x43, 0xdb, 0xa2, 0x52, 0xae, 0xe4, 0x7c, 0xec,
|
||||
0x3d, 0x55, 0x24, 0x31, 0x22, 0x4e, 0x66, 0x08, 0xfc, 0x06, 0xaa, 0xa4, 0xbe, 0xa7, 0x36, 0x7d,
|
||||
0x63, 0x6e, 0x35, 0x13, 0x11, 0xc7, 0x26, 0xda, 0x1a, 0xc6, 0x61, 0x1a, 0x89, 0xc7, 0x12, 0x5b,
|
||||
0x00, 0x09, 0xdd, 0x3f, 0x91, 0x11, 0xa2, 0x32, 0xf8, 0x04, 0xd5, 0x40, 0x6c, 0xe6, 0x66, 0xb5,
|
||||
0x55, 0x69, 0x37, 0xba, 0x9d, 0xcd, 0xa6, 0xb5, 0xe4, 0x36, 0xef, 0x31, 0x1e, 0x4f, 0xe6, 0xa6,
|
||||
0x12, 0x31, 0x92, 0xd3, 0x1d, 0x0c, 0xd4, 0xc6, 0x97, 0x18, 0xbc, 0x87, 0x2a, 0x23, 0x98, 0xe4,
|
||||
0x13, 0x11, 0xf1, 0x13, 0x7f, 0x84, 0x6a, 0x63, 0xf1, 0x31, 0x50, 0x2a, 0xb7, 0xd7, 0xdf, 0x5b,
|
||||
0x7c, 0x3c, 0x48, 0x5e, 0x76, 0xbf, 0x7c, 0x4f, 0x73, 0xda, 0xe7, 0x17, 0x7a, 0xe9, 0xd9, 0x85,
|
||||
0x5e, 0x7a, 0x7e, 0xa1, 0x97, 0x9e, 0x66, 0xba, 0x76, 0x9e, 0xe9, 0xda, 0xb3, 0x4c, 0xd7, 0x9e,
|
||||
0x67, 0xba, 0xf6, 0x77, 0xa6, 0x6b, 0x3f, 0xfe, 0xa3, 0x97, 0xbe, 0x2e, 0x8f, 0x3b, 0xff, 0x07,
|
||||
0x00, 0x00, 0xff, 0xff, 0x5e, 0x8d, 0x94, 0x78, 0x88, 0x08, 0x00, 0x00,
|
||||
// 900 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x6f, 0xe3, 0x44,
|
||||
0x14, 0x8e, 0xf3, 0xa3, 0x4a, 0x26, 0xdb, 0xd2, 0xce, 0xb2, 0x52, 0x54, 0xc0, 0x2e, 0x41, 0x42,
|
||||
0x15, 0xb0, 0xf6, 0x26, 0x42, 0xb0, 0x5a, 0x24, 0xa4, 0x9a, 0x46, 0x10, 0x21, 0xd8, 0xd5, 0xec,
|
||||
0xb6, 0x20, 0x4e, 0x4c, 0xec, 0xd7, 0xc4, 0x04, 0x8f, 0x8d, 0x3d, 0x0e, 0x9b, 0xdb, 0xfe, 0x09,
|
||||
0x1c, 0x41, 0xe2, 0xc0, 0x1f, 0x81, 0xc4, 0xbf, 0xd0, 0xe3, 0x8a, 0xd3, 0x1e, 0x50, 0x44, 0xcd,
|
||||
0x95, 0x23, 0x27, 0x4e, 0x68, 0xc6, 0xd3, 0x38, 0x4e, 0xda, 0x34, 0x27, 0x6e, 0x9e, 0xf7, 0xbe,
|
||||
0xf7, 0xbd, 0x37, 0xdf, 0x7c, 0x9e, 0x41, 0xbd, 0xf1, 0xfd, 0xd8, 0xf4, 0x02, 0x6b, 0x9c, 0x0c,
|
||||
0x20, 0x62, 0xc0, 0x21, 0xb6, 0x26, 0xc0, 0xdc, 0x20, 0xb2, 0x54, 0x82, 0x86, 0x9e, 0x45, 0x13,
|
||||
0x3e, 0x02, 0xc6, 0x3d, 0x87, 0x72, 0x2f, 0x60, 0xd6, 0xa4, 0x63, 0x0d, 0x81, 0x41, 0x44, 0x39,
|
||||
0xb8, 0x66, 0x18, 0x05, 0x3c, 0xc0, 0xaf, 0x66, 0x68, 0x93, 0x86, 0x9e, 0x59, 0x44, 0x9b, 0x93,
|
||||
0xce, 0xfe, 0xdd, 0xa1, 0xc7, 0x47, 0xc9, 0xc0, 0x74, 0x02, 0xdf, 0x1a, 0x06, 0xc3, 0xc0, 0x92,
|
||||
0x45, 0x83, 0xe4, 0x4c, 0xae, 0xe4, 0x42, 0x7e, 0x65, 0x64, 0xfb, 0xef, 0xe6, 0xad, 0x7d, 0xea,
|
||||
0x8c, 0x3c, 0x06, 0xd1, 0xd4, 0x0a, 0xc7, 0x43, 0x11, 0x88, 0x2d, 0x1f, 0x38, 0xbd, 0x62, 0x84,
|
||||
0x7d, 0xeb, 0xba, 0xaa, 0x28, 0x61, 0xdc, 0xf3, 0x61, 0xa5, 0xe0, 0xbd, 0x9b, 0x0a, 0x62, 0x67,
|
||||
0x04, 0x3e, 0x5d, 0xae, 0x6b, 0xff, 0xae, 0xa1, 0x97, 0xed, 0x20, 0x61, 0xee, 0xc3, 0xc1, 0x37,
|
||||
0xe0, 0x70, 0x02, 0x67, 0x10, 0x01, 0x73, 0x00, 0x1f, 0xa0, 0xea, 0xd8, 0x63, 0x6e, 0x4b, 0x3b,
|
||||
0xd0, 0x0e, 0x1b, 0xf6, 0xad, 0xf3, 0x99, 0x51, 0x4a, 0x67, 0x46, 0xf5, 0x53, 0x8f, 0xb9, 0x44,
|
||||
0x66, 0x70, 0x17, 0x21, 0xfa, 0xa8, 0x7f, 0x0a, 0x51, 0xec, 0x05, 0xac, 0x55, 0x96, 0x38, 0xac,
|
||||
0x70, 0xe8, 0x68, 0x9e, 0x21, 0x0b, 0x28, 0xc1, 0xca, 0xa8, 0x0f, 0xad, 0x4a, 0x91, 0xf5, 0x73,
|
||||
0xea, 0x03, 0x91, 0x19, 0x6c, 0xa3, 0x4a, 0xd2, 0x3f, 0x6e, 0x55, 0x25, 0xe0, 0x9e, 0x02, 0x54,
|
||||
0x4e, 0xfa, 0xc7, 0xff, 0xce, 0x8c, 0xd7, 0xaf, 0xdb, 0x24, 0x9f, 0x86, 0x10, 0x9b, 0x27, 0xfd,
|
||||
0x63, 0x22, 0x8a, 0xdb, 0xef, 0x23, 0xd4, 0x7b, 0xca, 0x23, 0x7a, 0x4a, 0xbf, 0x4d, 0x00, 0x1b,
|
||||
0xa8, 0xe6, 0x71, 0xf0, 0xe3, 0x96, 0x76, 0x50, 0x39, 0x6c, 0xd8, 0x8d, 0x74, 0x66, 0xd4, 0xfa,
|
||||
0x22, 0x40, 0xb2, 0xf8, 0x83, 0xfa, 0x8f, 0xbf, 0x18, 0xa5, 0x67, 0x7f, 0x1c, 0x94, 0xda, 0x3f,
|
||||
0x97, 0xd1, 0xad, 0x27, 0xc1, 0x18, 0x18, 0x81, 0xef, 0x12, 0x88, 0x39, 0xfe, 0x1a, 0xd5, 0xc5,
|
||||
0x11, 0xb9, 0x94, 0x53, 0xa9, 0x44, 0xb3, 0x7b, 0xcf, 0xcc, 0xdd, 0x31, 0x1f, 0xc2, 0x0c, 0xc7,
|
||||
0x43, 0x11, 0x88, 0x4d, 0x81, 0x36, 0x27, 0x1d, 0x33, 0x93, 0xf3, 0x33, 0xe0, 0x34, 0xd7, 0x24,
|
||||
0x8f, 0x91, 0x39, 0x2b, 0x7e, 0x84, 0xaa, 0x71, 0x08, 0x8e, 0xd4, 0xaf, 0xd9, 0x35, 0xcd, 0x75,
|
||||
0xde, 0x33, 0x17, 0x67, 0x7b, 0x1c, 0x82, 0x93, 0x2b, 0x28, 0x56, 0x44, 0x32, 0xe1, 0x2f, 0xd1,
|
||||
0x56, 0xcc, 0x29, 0x4f, 0x62, 0xa9, 0x72, 0x71, 0xe2, 0x9b, 0x38, 0x65, 0x9d, 0xbd, 0xa3, 0x58,
|
||||
0xb7, 0xb2, 0x35, 0x51, 0x7c, 0xed, 0x7f, 0x34, 0xb4, 0xbb, 0x3c, 0x02, 0x7e, 0x1b, 0x35, 0x68,
|
||||
0xe2, 0x7a, 0xc2, 0x34, 0x97, 0x12, 0x6f, 0xa7, 0x33, 0xa3, 0x71, 0x74, 0x19, 0x24, 0x79, 0x1e,
|
||||
0x33, 0xb4, 0x33, 0x28, 0xb8, 0x4d, 0xcd, 0xd8, 0x5d, 0x3f, 0xe3, 0x55, 0x0e, 0xb5, 0x71, 0x3a,
|
||||
0x33, 0x76, 0x8a, 0x19, 0xb2, 0xc4, 0x8e, 0x3f, 0x42, 0x7b, 0xf0, 0x34, 0xf4, 0x22, 0xc9, 0xf4,
|
||||
0x18, 0x9c, 0x80, 0xb9, 0xb1, 0xf4, 0x56, 0xc5, 0xbe, 0x93, 0xce, 0x8c, 0xbd, 0xde, 0x72, 0x92,
|
||||
0xac, 0xe2, 0xdb, 0xbf, 0x6a, 0x08, 0xaf, 0xaa, 0x84, 0xdf, 0x40, 0x35, 0x2e, 0xa2, 0xea, 0x17,
|
||||
0xd9, 0x56, 0xa2, 0xd5, 0x32, 0x68, 0x96, 0xc3, 0x53, 0x74, 0x3b, 0x27, 0x7c, 0xe2, 0xf9, 0x10,
|
||||
0x73, 0xea, 0x87, 0xea, 0xb4, 0xdf, 0xda, 0xcc, 0x4b, 0xa2, 0xcc, 0x7e, 0x45, 0xd1, 0xdf, 0xee,
|
||||
0xad, 0xd2, 0x91, 0xab, 0x7a, 0xb4, 0x7f, 0x2a, 0xa3, 0xa6, 0x1a, 0x7b, 0xe2, 0xc1, 0xf7, 0xff,
|
||||
0x83, 0x97, 0x1f, 0x16, 0xbc, 0x7c, 0x77, 0x23, 0xdf, 0x89, 0xd1, 0xae, 0xb5, 0xf2, 0x17, 0x4b,
|
||||
0x56, 0xb6, 0x36, 0xa7, 0x5c, 0xef, 0x64, 0x07, 0xbd, 0xb4, 0xd4, 0x7f, 0xb3, 0xe3, 0x2c, 0x98,
|
||||
0xbd, 0xbc, 0xde, 0xec, 0xed, 0xbf, 0x35, 0xb4, 0xb7, 0x32, 0x12, 0xfe, 0x00, 0x6d, 0x2f, 0x4c,
|
||||
0x0e, 0xd9, 0x0d, 0x5b, 0xb7, 0xef, 0xa8, 0x7e, 0xdb, 0x47, 0x8b, 0x49, 0x52, 0xc4, 0xe2, 0x4f,
|
||||
0x50, 0x35, 0x89, 0x21, 0x52, 0x0a, 0xbf, 0xb9, 0x5e, 0x8e, 0x93, 0x18, 0xa2, 0x3e, 0x3b, 0x0b,
|
||||
0x72, 0x69, 0x45, 0x84, 0x48, 0x06, 0xb1, 0x5d, 0x88, 0xa2, 0x20, 0x52, 0x57, 0xf1, 0x7c, 0xbb,
|
||||
0x3d, 0x11, 0x24, 0x59, 0xae, 0xb8, 0xdd, 0xea, 0x0d, 0xdb, 0xfd, 0xad, 0x8c, 0xea, 0x97, 0x2d,
|
||||
0xf1, 0x3b, 0xa8, 0x2e, 0xda, 0xc8, 0xcb, 0x3e, 0x13, 0x74, 0x57, 0x75, 0x90, 0x18, 0x11, 0x27,
|
||||
0x73, 0x04, 0x7e, 0x0d, 0x55, 0x12, 0xcf, 0x55, 0x6f, 0x48, 0x73, 0xe1, 0xd2, 0x27, 0x22, 0x8e,
|
||||
0xdb, 0x68, 0x6b, 0x18, 0x05, 0x49, 0x28, 0x6c, 0x20, 0x66, 0x40, 0xe2, 0x44, 0x3f, 0x96, 0x11,
|
||||
0xa2, 0x32, 0xf8, 0x14, 0xd5, 0x40, 0xdc, 0xf9, 0x72, 0xcc, 0x66, 0xb7, 0xb3, 0x99, 0x34, 0xa6,
|
||||
0x7c, 0x27, 0x7a, 0x8c, 0x47, 0xd3, 0x05, 0x09, 0x44, 0x8c, 0x64, 0x74, 0xfb, 0x03, 0xf5, 0x96,
|
||||
0x48, 0x0c, 0xde, 0x45, 0x95, 0x31, 0x4c, 0xb3, 0x1d, 0x11, 0xf1, 0x89, 0x3f, 0x44, 0xb5, 0x89,
|
||||
0x78, 0x66, 0xd4, 0x91, 0x1c, 0xae, 0xef, 0x9b, 0x3f, 0x4b, 0x24, 0x2b, 0x7b, 0x50, 0xbe, 0xaf,
|
||||
0xd9, 0x87, 0xe7, 0x17, 0x7a, 0xe9, 0xf9, 0x85, 0x5e, 0x7a, 0x71, 0xa1, 0x97, 0x9e, 0xa5, 0xba,
|
||||
0x76, 0x9e, 0xea, 0xda, 0xf3, 0x54, 0xd7, 0x5e, 0xa4, 0xba, 0xf6, 0x67, 0xaa, 0x6b, 0x3f, 0xfc,
|
||||
0xa5, 0x97, 0xbe, 0x2a, 0x4f, 0x3a, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x04, 0x81, 0x6f,
|
||||
0xe2, 0x08, 0x00, 0x00,
|
||||
}
|
||||
|
|
20
vendor/k8s.io/api/authentication/v1/generated.proto
generated
vendored
20
vendor/k8s.io/api/authentication/v1/generated.proto
generated
vendored
|
@ -118,6 +118,14 @@ message TokenReviewSpec {
|
|||
// Token is the opaque bearer token.
|
||||
// +optional
|
||||
optional string token = 1;
|
||||
|
||||
// Audiences is a list of the identifiers that the resource server presented
|
||||
// with the token identifies as. Audience-aware token authenticators will
|
||||
// verify that the token was intended for at least one of the audiences in
|
||||
// this list. If no audiences are provided, the audience will default to the
|
||||
// audience of the Kubernetes apiserver.
|
||||
// +optional
|
||||
repeated string audiences = 2;
|
||||
}
|
||||
|
||||
// TokenReviewStatus is the result of the token authentication request.
|
||||
|
@ -130,6 +138,18 @@ message TokenReviewStatus {
|
|||
// +optional
|
||||
optional UserInfo user = 2;
|
||||
|
||||
// Audiences are audience identifiers chosen by the authenticator that are
|
||||
// compatible with both the TokenReview and token. An identifier is any
|
||||
// identifier in the intersection of the TokenReviewSpec audiences and the
|
||||
// token's audiences. A client of the TokenReview API that sets the
|
||||
// spec.audiences field should validate that a compatible audience identifier
|
||||
// is returned in the status.audiences field to ensure that the TokenReview
|
||||
// server is audience aware. If a TokenReview returns an empty
|
||||
// status.audience field where status.authenticated is "true", the token is
|
||||
// valid against the audience of the Kubernetes API server.
|
||||
// +optional
|
||||
repeated string audiences = 4;
|
||||
|
||||
// Error indicates that the token couldn't be checked
|
||||
// +optional
|
||||
optional string error = 3;
|
||||
|
|
18
vendor/k8s.io/api/authentication/v1/types.go
generated
vendored
18
vendor/k8s.io/api/authentication/v1/types.go
generated
vendored
|
@ -64,6 +64,13 @@ type TokenReviewSpec struct {
|
|||
// Token is the opaque bearer token.
|
||||
// +optional
|
||||
Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"`
|
||||
// Audiences is a list of the identifiers that the resource server presented
|
||||
// with the token identifies as. Audience-aware token authenticators will
|
||||
// verify that the token was intended for at least one of the audiences in
|
||||
// this list. If no audiences are provided, the audience will default to the
|
||||
// audience of the Kubernetes apiserver.
|
||||
// +optional
|
||||
Audiences []string `json:"audiences,omitempty" protobuf:"bytes,2,rep,name=audiences"`
|
||||
}
|
||||
|
||||
// TokenReviewStatus is the result of the token authentication request.
|
||||
|
@ -74,6 +81,17 @@ type TokenReviewStatus struct {
|
|||
// User is the UserInfo associated with the provided token.
|
||||
// +optional
|
||||
User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"`
|
||||
// Audiences are audience identifiers chosen by the authenticator that are
|
||||
// compatible with both the TokenReview and token. An identifier is any
|
||||
// identifier in the intersection of the TokenReviewSpec audiences and the
|
||||
// token's audiences. A client of the TokenReview API that sets the
|
||||
// spec.audiences field should validate that a compatible audience identifier
|
||||
// is returned in the status.audiences field to ensure that the TokenReview
|
||||
// server is audience aware. If a TokenReview returns an empty
|
||||
// status.audience field where status.authenticated is "true", the token is
|
||||
// valid against the audience of the Kubernetes API server.
|
||||
// +optional
|
||||
Audiences []string `json:"audiences,omitempty" protobuf:"bytes,4,rep,name=audiences"`
|
||||
// Error indicates that the token couldn't be checked
|
||||
// +optional
|
||||
Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"`
|
||||
|
|
6
vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go
generated
vendored
6
vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go
generated
vendored
|
@ -79,8 +79,9 @@ func (TokenReview) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_TokenReviewSpec = map[string]string{
|
||||
"": "TokenReviewSpec is a description of the token authentication request.",
|
||||
"token": "Token is the opaque bearer token.",
|
||||
"": "TokenReviewSpec is a description of the token authentication request.",
|
||||
"token": "Token is the opaque bearer token.",
|
||||
"audiences": "Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.",
|
||||
}
|
||||
|
||||
func (TokenReviewSpec) SwaggerDoc() map[string]string {
|
||||
|
@ -91,6 +92,7 @@ var map_TokenReviewStatus = map[string]string{
|
|||
"": "TokenReviewStatus is the result of the token authentication request.",
|
||||
"authenticated": "Authenticated indicates that the token was associated with a known user.",
|
||||
"user": "User is the UserInfo associated with the provided token.",
|
||||
"audiences": "Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \"true\", the token is valid against the audience of the Kubernetes API server.",
|
||||
"error": "Error indicates that the token couldn't be checked",
|
||||
}
|
||||
|
||||
|
|
12
vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go
generated
vendored
12
vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go
generated
vendored
|
@ -141,7 +141,7 @@ func (in *TokenReview) DeepCopyInto(out *TokenReview) {
|
|||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
@ -167,6 +167,11 @@ func (in *TokenReview) DeepCopyObject() runtime.Object {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) {
|
||||
*out = *in
|
||||
if in.Audiences != nil {
|
||||
in, out := &in.Audiences, &out.Audiences
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -184,6 +189,11 @@ func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec {
|
|||
func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) {
|
||||
*out = *in
|
||||
in.User.DeepCopyInto(&out.User)
|
||||
if in.Audiences != nil {
|
||||
in, out := &in.Audiences, &out.Audiences
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
1
vendor/k8s.io/api/authentication/v1beta1/doc.go
generated
vendored
1
vendor/k8s.io/api/authentication/v1beta1/doc.go
generated
vendored
|
@ -17,4 +17,5 @@ limitations under the License.
|
|||
// +k8s:deepcopy-gen=package
|
||||
// +groupName=authentication.k8s.io
|
||||
// +k8s:openapi-gen=true
|
||||
|
||||
package v1beta1 // import "k8s.io/api/authentication/v1beta1"
|
||||
|
|
353
vendor/k8s.io/api/authentication/v1beta1/generated.pb.go
generated
vendored
353
vendor/k8s.io/api/authentication/v1beta1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1beta1 is a generated protocol buffer package.
|
||||
|
@ -176,6 +175,21 @@ func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) {
|
|||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token)))
|
||||
i += copy(dAtA[i:], m.Token)
|
||||
if len(m.Audiences) > 0 {
|
||||
for _, s := range m.Audiences {
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
l >>= 7
|
||||
i++
|
||||
}
|
||||
dAtA[i] = uint8(l)
|
||||
i++
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
|
@ -214,6 +228,21 @@ func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) {
|
|||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error)))
|
||||
i += copy(dAtA[i:], m.Error)
|
||||
if len(m.Audiences) > 0 {
|
||||
for _, s := range m.Audiences {
|
||||
dAtA[i] = 0x22
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
l >>= 7
|
||||
i++
|
||||
}
|
||||
dAtA[i] = uint8(l)
|
||||
i++
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
|
@ -289,24 +318,6 @@ func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
@ -345,6 +356,12 @@ func (m *TokenReviewSpec) Size() (n int) {
|
|||
_ = l
|
||||
l = len(m.Token)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
if len(m.Audiences) > 0 {
|
||||
for _, s := range m.Audiences {
|
||||
l = len(s)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
|
@ -356,6 +373,12 @@ func (m *TokenReviewStatus) Size() (n int) {
|
|||
n += 1 + l + sovGenerated(uint64(l))
|
||||
l = len(m.Error)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
if len(m.Audiences) > 0 {
|
||||
for _, s := range m.Audiences {
|
||||
l = len(s)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
|
@ -415,6 +438,7 @@ func (this *TokenReviewSpec) String() string {
|
|||
}
|
||||
s := strings.Join([]string{`&TokenReviewSpec{`,
|
||||
`Token:` + fmt.Sprintf("%v", this.Token) + `,`,
|
||||
`Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
|
@ -427,6 +451,7 @@ func (this *TokenReviewStatus) String() string {
|
|||
`Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`,
|
||||
`User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`,
|
||||
`Error:` + fmt.Sprintf("%v", this.Error) + `,`,
|
||||
`Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
|
@ -739,6 +764,35 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
m.Token = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
|
@ -868,6 +922,35 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
m.Error = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 4:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
|
@ -1031,51 +1114,14 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var keykey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
keykey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
if m.Extra == nil {
|
||||
m.Extra = make(map[string]ExtraValue)
|
||||
}
|
||||
if iNdEx < postIndex {
|
||||
var valuekey uint64
|
||||
var mapkey string
|
||||
mapvalue := &ExtraValue{}
|
||||
for iNdEx < postIndex {
|
||||
entryPreIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
|
@ -1085,46 +1131,85 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
valuekey |= (uint64(b) & 0x7F) << shift
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var mapmsglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
fieldNum := int32(wire >> 3)
|
||||
if fieldNum == 1 {
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iNdEx >= l {
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
mapmsglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
} else if fieldNum == 2 {
|
||||
var mapmsglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
mapmsglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postmsgIndex := iNdEx + mapmsglen
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postmsgIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue = &ExtraValue{}
|
||||
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postmsgIndex
|
||||
} else {
|
||||
iNdEx = entryPreIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postmsgIndex := iNdEx + mapmsglen
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postmsgIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue := &ExtraValue{}
|
||||
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postmsgIndex
|
||||
m.Extra[mapkey] = *mapvalue
|
||||
} else {
|
||||
var mapvalue ExtraValue
|
||||
m.Extra[mapkey] = mapvalue
|
||||
}
|
||||
m.Extra[mapkey] = *mapvalue
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
|
@ -1257,45 +1342,47 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptorGenerated = []byte{
|
||||
// 635 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcf, 0x4f, 0xd4, 0x40,
|
||||
0x14, 0x6e, 0xf7, 0x07, 0xee, 0xce, 0x8a, 0xe2, 0x24, 0x26, 0x9b, 0x4d, 0xec, 0xae, 0xeb, 0x85,
|
||||
0x44, 0x99, 0x0a, 0x21, 0x48, 0xf0, 0x64, 0x95, 0x18, 0x4c, 0x88, 0xc9, 0x08, 0x1e, 0xd4, 0x83,
|
||||
0xb3, 0xdd, 0x47, 0xb7, 0xae, 0xed, 0x34, 0xd3, 0x69, 0x95, 0x1b, 0x7f, 0x82, 0x47, 0x8f, 0x26,
|
||||
0xfe, 0x25, 0x26, 0x1e, 0x38, 0x72, 0xe4, 0x60, 0x88, 0xd4, 0x7f, 0xc4, 0xcc, 0x74, 0x64, 0x17,
|
||||
0x88, 0x01, 0x6e, 0xf3, 0xbe, 0xf7, 0xbe, 0x6f, 0xde, 0xf7, 0x66, 0x1e, 0x7a, 0x31, 0x5e, 0x4d,
|
||||
0x49, 0xc8, 0xdd, 0x71, 0x36, 0x00, 0x11, 0x83, 0x84, 0xd4, 0xcd, 0x21, 0x1e, 0x72, 0xe1, 0x9a,
|
||||
0x04, 0x4b, 0x42, 0x97, 0x65, 0x72, 0x04, 0xb1, 0x0c, 0x7d, 0x26, 0x43, 0x1e, 0xbb, 0xf9, 0xe2,
|
||||
0x00, 0x24, 0x5b, 0x74, 0x03, 0x88, 0x41, 0x30, 0x09, 0x43, 0x92, 0x08, 0x2e, 0x39, 0xbe, 0x5b,
|
||||
0x52, 0x08, 0x4b, 0x42, 0x72, 0x9a, 0x42, 0x0c, 0xa5, 0xb3, 0x10, 0x84, 0x72, 0x94, 0x0d, 0x88,
|
||||
0xcf, 0x23, 0x37, 0xe0, 0x01, 0x77, 0x35, 0x73, 0x90, 0xed, 0xe8, 0x48, 0x07, 0xfa, 0x54, 0x2a,
|
||||
0x76, 0x96, 0x27, 0x4d, 0x44, 0xcc, 0x1f, 0x85, 0x31, 0x88, 0x5d, 0x37, 0x19, 0x07, 0x0a, 0x48,
|
||||
0xdd, 0x08, 0x24, 0x73, 0xf3, 0x73, 0x7d, 0x74, 0xdc, 0xff, 0xb1, 0x44, 0x16, 0xcb, 0x30, 0x82,
|
||||
0x73, 0x84, 0x95, 0x8b, 0x08, 0xa9, 0x3f, 0x82, 0x88, 0x9d, 0xe5, 0xf5, 0x1f, 0x21, 0xb4, 0xfe,
|
||||
0x59, 0x0a, 0xf6, 0x9a, 0x7d, 0xcc, 0x00, 0x77, 0x51, 0x3d, 0x94, 0x10, 0xa5, 0x6d, 0xbb, 0x57,
|
||||
0x9d, 0x6f, 0x7a, 0xcd, 0xe2, 0xa8, 0x5b, 0xdf, 0x50, 0x00, 0x2d, 0xf1, 0xb5, 0xc6, 0xd7, 0x6f,
|
||||
0x5d, 0x6b, 0xef, 0x57, 0xcf, 0xea, 0x7f, 0xaf, 0xa0, 0xd6, 0x16, 0x1f, 0x43, 0x4c, 0x21, 0x0f,
|
||||
0xe1, 0x13, 0x7e, 0x8f, 0x1a, 0xca, 0xcc, 0x90, 0x49, 0xd6, 0xb6, 0x7b, 0xf6, 0x7c, 0x6b, 0xe9,
|
||||
0x21, 0x99, 0x0c, 0xf3, 0xa4, 0x27, 0x92, 0x8c, 0x03, 0x05, 0xa4, 0x44, 0x55, 0x93, 0x7c, 0x91,
|
||||
0xbc, 0x1c, 0x7c, 0x00, 0x5f, 0x6e, 0x82, 0x64, 0x1e, 0xde, 0x3f, 0xea, 0x5a, 0xc5, 0x51, 0x17,
|
||||
0x4d, 0x30, 0x7a, 0xa2, 0x8a, 0xb7, 0x50, 0x2d, 0x4d, 0xc0, 0x6f, 0x57, 0xb4, 0xfa, 0x12, 0xb9,
|
||||
0xf0, 0xa9, 0xc8, 0x54, 0x7f, 0xaf, 0x12, 0xf0, 0xbd, 0xeb, 0x46, 0xbf, 0xa6, 0x22, 0xaa, 0xd5,
|
||||
0xf0, 0x3b, 0x34, 0x93, 0x4a, 0x26, 0xb3, 0xb4, 0x5d, 0xd5, 0xba, 0xcb, 0x57, 0xd4, 0xd5, 0x5c,
|
||||
0xef, 0x86, 0x51, 0x9e, 0x29, 0x63, 0x6a, 0x34, 0xfb, 0x2b, 0xe8, 0xe6, 0x99, 0x26, 0xf0, 0x3d,
|
||||
0x54, 0x97, 0x0a, 0xd2, 0x53, 0x6a, 0x7a, 0xb3, 0x86, 0x59, 0x2f, 0xeb, 0xca, 0x5c, 0xff, 0xa7,
|
||||
0x8d, 0x6e, 0x9d, 0xbb, 0x05, 0x3f, 0x46, 0xb3, 0x53, 0x1d, 0xc1, 0x50, 0x4b, 0x34, 0xbc, 0xdb,
|
||||
0x46, 0x62, 0xf6, 0xc9, 0x74, 0x92, 0x9e, 0xae, 0xc5, 0x9b, 0xa8, 0x96, 0xa5, 0x20, 0xcc, 0xf8,
|
||||
0xee, 0x5f, 0xc2, 0xe6, 0x76, 0x0a, 0x62, 0x23, 0xde, 0xe1, 0x93, 0xb9, 0x29, 0x84, 0x6a, 0x19,
|
||||
0x65, 0x03, 0x84, 0xe0, 0x42, 0x8f, 0x6d, 0xca, 0xc6, 0xba, 0x02, 0x69, 0x99, 0xeb, 0xff, 0xa8,
|
||||
0xa0, 0xc6, 0x3f, 0x15, 0xfc, 0x00, 0x35, 0x14, 0x33, 0x66, 0x11, 0x18, 0xef, 0x73, 0x86, 0xa4,
|
||||
0x6b, 0x14, 0x4e, 0x4f, 0x2a, 0xf0, 0x1d, 0x54, 0xcd, 0xc2, 0xa1, 0xee, 0xb6, 0xe9, 0xb5, 0x4c,
|
||||
0x61, 0x75, 0x7b, 0xe3, 0x19, 0x55, 0x38, 0xee, 0xa3, 0x99, 0x40, 0xf0, 0x2c, 0x51, 0xcf, 0xa6,
|
||||
0xbe, 0x2a, 0x52, 0xc3, 0x7f, 0xae, 0x11, 0x6a, 0x32, 0xf8, 0x2d, 0xaa, 0x83, 0xfa, 0xdb, 0xed,
|
||||
0x5a, 0xaf, 0x3a, 0xdf, 0x5a, 0x5a, 0xb9, 0x82, 0x65, 0xa2, 0x97, 0x62, 0x3d, 0x96, 0x62, 0x77,
|
||||
0xca, 0x9a, 0xc2, 0x68, 0xa9, 0xd9, 0x09, 0xcc, 0xe2, 0xe8, 0x1a, 0x3c, 0x87, 0xaa, 0x63, 0xd8,
|
||||
0x2d, 0x6d, 0x51, 0x75, 0xc4, 0x4f, 0x51, 0x3d, 0x57, 0x3b, 0x65, 0xe6, 0xbd, 0x70, 0x89, 0xcb,
|
||||
0x27, 0x8b, 0x48, 0x4b, 0xee, 0x5a, 0x65, 0xd5, 0xf6, 0x16, 0xf6, 0x8f, 0x1d, 0xeb, 0xe0, 0xd8,
|
||||
0xb1, 0x0e, 0x8f, 0x1d, 0x6b, 0xaf, 0x70, 0xec, 0xfd, 0xc2, 0xb1, 0x0f, 0x0a, 0xc7, 0x3e, 0x2c,
|
||||
0x1c, 0xfb, 0x77, 0xe1, 0xd8, 0x5f, 0xfe, 0x38, 0xd6, 0x9b, 0x6b, 0x46, 0xe4, 0x6f, 0x00, 0x00,
|
||||
0x00, 0xff, 0xff, 0x39, 0x00, 0xe7, 0xfa, 0x0e, 0x05, 0x00, 0x00,
|
||||
// 663 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcd, 0x4e, 0x14, 0x4d,
|
||||
0x14, 0xed, 0x9e, 0x1f, 0xbe, 0x99, 0x9a, 0x6f, 0x14, 0x2b, 0x31, 0x99, 0x4c, 0x62, 0x0f, 0x8e,
|
||||
0x1b, 0x12, 0xa4, 0x5a, 0x08, 0x41, 0x82, 0x2b, 0x5a, 0x89, 0xc1, 0x84, 0x98, 0x94, 0xe0, 0x42,
|
||||
0x5d, 0x58, 0xd3, 0x73, 0xe9, 0x69, 0xc7, 0xfe, 0x49, 0x55, 0xf5, 0x28, 0x3b, 0x1e, 0xc1, 0xa5,
|
||||
0x4b, 0x13, 0x9f, 0xc4, 0x1d, 0x4b, 0x96, 0x2c, 0xcc, 0x44, 0xda, 0x27, 0xf0, 0x0d, 0x4c, 0x55,
|
||||
0x17, 0xcc, 0x00, 0x31, 0xc0, 0xae, 0xeb, 0xdc, 0x7b, 0xce, 0x3d, 0xf7, 0x54, 0x17, 0x7a, 0x31,
|
||||
0x5c, 0x13, 0x24, 0x4c, 0xdc, 0x61, 0xd6, 0x03, 0x1e, 0x83, 0x04, 0xe1, 0x8e, 0x20, 0xee, 0x27,
|
||||
0xdc, 0x35, 0x05, 0x96, 0x86, 0x2e, 0xcb, 0xe4, 0x00, 0x62, 0x19, 0xfa, 0x4c, 0x86, 0x49, 0xec,
|
||||
0x8e, 0x96, 0x7a, 0x20, 0xd9, 0x92, 0x1b, 0x40, 0x0c, 0x9c, 0x49, 0xe8, 0x93, 0x94, 0x27, 0x32,
|
||||
0xc1, 0xf7, 0x0b, 0x0a, 0x61, 0x69, 0x48, 0xce, 0x53, 0x88, 0xa1, 0xb4, 0x17, 0x83, 0x50, 0x0e,
|
||||
0xb2, 0x1e, 0xf1, 0x93, 0xc8, 0x0d, 0x92, 0x20, 0x71, 0x35, 0xb3, 0x97, 0xed, 0xe9, 0x93, 0x3e,
|
||||
0xe8, 0xaf, 0x42, 0xb1, 0xbd, 0x32, 0x31, 0x11, 0x31, 0x7f, 0x10, 0xc6, 0xc0, 0xf7, 0xdd, 0x74,
|
||||
0x18, 0x28, 0x40, 0xb8, 0x11, 0x48, 0xe6, 0x8e, 0x2e, 0xf9, 0x68, 0xbb, 0xff, 0x62, 0xf1, 0x2c,
|
||||
0x96, 0x61, 0x04, 0x97, 0x08, 0xab, 0x57, 0x11, 0x84, 0x3f, 0x80, 0x88, 0x5d, 0xe4, 0x75, 0x1f,
|
||||
0x23, 0xb4, 0xf9, 0x59, 0x72, 0xf6, 0x9a, 0x7d, 0xcc, 0x00, 0x77, 0x50, 0x35, 0x94, 0x10, 0x89,
|
||||
0x96, 0x3d, 0x57, 0x9e, 0xaf, 0x7b, 0xf5, 0x7c, 0xdc, 0xa9, 0x6e, 0x29, 0x80, 0x16, 0xf8, 0x7a,
|
||||
0xed, 0xeb, 0xb7, 0x8e, 0x75, 0xf0, 0x73, 0xce, 0xea, 0x7e, 0x2f, 0xa1, 0xc6, 0x4e, 0x32, 0x84,
|
||||
0x98, 0xc2, 0x28, 0x84, 0x4f, 0xf8, 0x3d, 0xaa, 0xa9, 0x65, 0xfa, 0x4c, 0xb2, 0x96, 0x3d, 0x67,
|
||||
0xcf, 0x37, 0x96, 0x1f, 0x91, 0x49, 0x98, 0x67, 0x9e, 0x48, 0x3a, 0x0c, 0x14, 0x20, 0x88, 0xea,
|
||||
0x26, 0xa3, 0x25, 0xf2, 0xb2, 0xf7, 0x01, 0x7c, 0xb9, 0x0d, 0x92, 0x79, 0xf8, 0x70, 0xdc, 0xb1,
|
||||
0xf2, 0x71, 0x07, 0x4d, 0x30, 0x7a, 0xa6, 0x8a, 0x77, 0x50, 0x45, 0xa4, 0xe0, 0xb7, 0x4a, 0x5a,
|
||||
0x7d, 0x99, 0x5c, 0x79, 0x55, 0x64, 0xca, 0xdf, 0xab, 0x14, 0x7c, 0xef, 0x7f, 0xa3, 0x5f, 0x51,
|
||||
0x27, 0xaa, 0xd5, 0xf0, 0x3b, 0x34, 0x23, 0x24, 0x93, 0x99, 0x68, 0x95, 0xb5, 0xee, 0xca, 0x0d,
|
||||
0x75, 0x35, 0xd7, 0xbb, 0x65, 0x94, 0x67, 0x8a, 0x33, 0x35, 0x9a, 0x5d, 0x1f, 0xdd, 0xbe, 0x60,
|
||||
0x02, 0x3f, 0x40, 0x55, 0xa9, 0x20, 0x9d, 0x52, 0xdd, 0x6b, 0x1a, 0x66, 0xb5, 0xe8, 0x2b, 0x6a,
|
||||
0x78, 0x01, 0xd5, 0x59, 0xd6, 0x0f, 0x21, 0xf6, 0x41, 0xb4, 0x4a, 0xfa, 0x32, 0x9a, 0xf9, 0xb8,
|
||||
0x53, 0xdf, 0x38, 0x05, 0xe9, 0xa4, 0xde, 0xfd, 0x63, 0xa3, 0x3b, 0x97, 0x2c, 0xe1, 0x27, 0xa8,
|
||||
0x39, 0x65, 0x1f, 0xfa, 0x7a, 0x5e, 0xcd, 0xbb, 0x6b, 0xe6, 0x35, 0x37, 0xa6, 0x8b, 0xf4, 0x7c,
|
||||
0x2f, 0xde, 0x46, 0x95, 0x4c, 0x00, 0x37, 0x59, 0x2f, 0x5c, 0x23, 0x93, 0x5d, 0x01, 0x7c, 0x2b,
|
||||
0xde, 0x4b, 0x26, 0x21, 0x2b, 0x84, 0x6a, 0x19, 0xb5, 0x33, 0x70, 0x9e, 0x70, 0x9d, 0xf1, 0xd4,
|
||||
0xce, 0x9b, 0x0a, 0xa4, 0x45, 0xed, 0xfc, 0xce, 0x95, 0x2b, 0x76, 0xfe, 0x51, 0x42, 0xb5, 0xd3,
|
||||
0x91, 0xf8, 0x21, 0xaa, 0xa9, 0x31, 0x31, 0x8b, 0xc0, 0xa4, 0x3a, 0x6b, 0x26, 0xe8, 0x1e, 0x85,
|
||||
0xd3, 0xb3, 0x0e, 0x7c, 0x0f, 0x95, 0xb3, 0xb0, 0xaf, 0x57, 0xab, 0x7b, 0x0d, 0xd3, 0x58, 0xde,
|
||||
0xdd, 0x7a, 0x46, 0x15, 0x8e, 0xbb, 0x68, 0x26, 0xe0, 0x49, 0x96, 0xaa, 0x1f, 0x42, 0x79, 0x40,
|
||||
0xea, 0x5a, 0x9f, 0x6b, 0x84, 0x9a, 0x0a, 0x7e, 0x8b, 0xaa, 0xa0, 0x5e, 0x8d, 0xb6, 0xd9, 0x58,
|
||||
0x5e, 0xbd, 0x41, 0x3e, 0x44, 0x3f, 0xb7, 0xcd, 0x58, 0xf2, 0xfd, 0xa9, 0x1c, 0x14, 0x46, 0x0b,
|
||||
0xcd, 0x76, 0x60, 0x9e, 0xa4, 0xee, 0xc1, 0xb3, 0xa8, 0x3c, 0x84, 0xfd, 0x62, 0x2d, 0xaa, 0x3e,
|
||||
0xf1, 0x53, 0x54, 0x1d, 0xa9, 0xd7, 0x6a, 0x2e, 0x67, 0xf1, 0x1a, 0xc3, 0x27, 0x4f, 0x9c, 0x16,
|
||||
0xdc, 0xf5, 0xd2, 0x9a, 0xed, 0x2d, 0x1e, 0x9e, 0x38, 0xd6, 0xd1, 0x89, 0x63, 0x1d, 0x9f, 0x38,
|
||||
0xd6, 0x41, 0xee, 0xd8, 0x87, 0xb9, 0x63, 0x1f, 0xe5, 0x8e, 0x7d, 0x9c, 0x3b, 0xf6, 0xaf, 0xdc,
|
||||
0xb1, 0xbf, 0xfc, 0x76, 0xac, 0x37, 0xff, 0x19, 0x91, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf7,
|
||||
0xd6, 0x32, 0x28, 0x68, 0x05, 0x00, 0x00,
|
||||
}
|
||||
|
|
20
vendor/k8s.io/api/authentication/v1beta1/generated.proto
generated
vendored
20
vendor/k8s.io/api/authentication/v1beta1/generated.proto
generated
vendored
|
@ -57,6 +57,14 @@ message TokenReviewSpec {
|
|||
// Token is the opaque bearer token.
|
||||
// +optional
|
||||
optional string token = 1;
|
||||
|
||||
// Audiences is a list of the identifiers that the resource server presented
|
||||
// with the token identifies as. Audience-aware token authenticators will
|
||||
// verify that the token was intended for at least one of the audiences in
|
||||
// this list. If no audiences are provided, the audience will default to the
|
||||
// audience of the Kubernetes apiserver.
|
||||
// +optional
|
||||
repeated string audiences = 2;
|
||||
}
|
||||
|
||||
// TokenReviewStatus is the result of the token authentication request.
|
||||
|
@ -69,6 +77,18 @@ message TokenReviewStatus {
|
|||
// +optional
|
||||
optional UserInfo user = 2;
|
||||
|
||||
// Audiences are audience identifiers chosen by the authenticator that are
|
||||
// compatible with both the TokenReview and token. An identifier is any
|
||||
// identifier in the intersection of the TokenReviewSpec audiences and the
|
||||
// token's audiences. A client of the TokenReview API that sets the
|
||||
// spec.audiences field should validate that a compatible audience identifier
|
||||
// is returned in the status.audiences field to ensure that the TokenReview
|
||||
// server is audience aware. If a TokenReview returns an empty
|
||||
// status.audience field where status.authenticated is "true", the token is
|
||||
// valid against the audience of the Kubernetes API server.
|
||||
// +optional
|
||||
repeated string audiences = 4;
|
||||
|
||||
// Error indicates that the token couldn't be checked
|
||||
// +optional
|
||||
optional string error = 3;
|
||||
|
|
18
vendor/k8s.io/api/authentication/v1beta1/types.go
generated
vendored
18
vendor/k8s.io/api/authentication/v1beta1/types.go
generated
vendored
|
@ -48,6 +48,13 @@ type TokenReviewSpec struct {
|
|||
// Token is the opaque bearer token.
|
||||
// +optional
|
||||
Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"`
|
||||
// Audiences is a list of the identifiers that the resource server presented
|
||||
// with the token identifies as. Audience-aware token authenticators will
|
||||
// verify that the token was intended for at least one of the audiences in
|
||||
// this list. If no audiences are provided, the audience will default to the
|
||||
// audience of the Kubernetes apiserver.
|
||||
// +optional
|
||||
Audiences []string `json:"audiences,omitempty" protobuf:"bytes,2,rep,name=audiences"`
|
||||
}
|
||||
|
||||
// TokenReviewStatus is the result of the token authentication request.
|
||||
|
@ -58,6 +65,17 @@ type TokenReviewStatus struct {
|
|||
// User is the UserInfo associated with the provided token.
|
||||
// +optional
|
||||
User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"`
|
||||
// Audiences are audience identifiers chosen by the authenticator that are
|
||||
// compatible with both the TokenReview and token. An identifier is any
|
||||
// identifier in the intersection of the TokenReviewSpec audiences and the
|
||||
// token's audiences. A client of the TokenReview API that sets the
|
||||
// spec.audiences field should validate that a compatible audience identifier
|
||||
// is returned in the status.audiences field to ensure that the TokenReview
|
||||
// server is audience aware. If a TokenReview returns an empty
|
||||
// status.audience field where status.authenticated is "true", the token is
|
||||
// valid against the audience of the Kubernetes API server.
|
||||
// +optional
|
||||
Audiences []string `json:"audiences,omitempty" protobuf:"bytes,4,rep,name=audiences"`
|
||||
// Error indicates that the token couldn't be checked
|
||||
// +optional
|
||||
Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"`
|
||||
|
|
6
vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go
generated
vendored
6
vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go
generated
vendored
|
@ -38,8 +38,9 @@ func (TokenReview) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_TokenReviewSpec = map[string]string{
|
||||
"": "TokenReviewSpec is a description of the token authentication request.",
|
||||
"token": "Token is the opaque bearer token.",
|
||||
"": "TokenReviewSpec is a description of the token authentication request.",
|
||||
"token": "Token is the opaque bearer token.",
|
||||
"audiences": "Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.",
|
||||
}
|
||||
|
||||
func (TokenReviewSpec) SwaggerDoc() map[string]string {
|
||||
|
@ -50,6 +51,7 @@ var map_TokenReviewStatus = map[string]string{
|
|||
"": "TokenReviewStatus is the result of the token authentication request.",
|
||||
"authenticated": "Authenticated indicates that the token was associated with a known user.",
|
||||
"user": "User is the UserInfo associated with the provided token.",
|
||||
"audiences": "Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \"true\", the token is valid against the audience of the Kubernetes API server.",
|
||||
"error": "Error indicates that the token couldn't be checked",
|
||||
}
|
||||
|
||||
|
|
12
vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go
generated
vendored
12
vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go
generated
vendored
|
@ -49,7 +49,7 @@ func (in *TokenReview) DeepCopyInto(out *TokenReview) {
|
|||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
out.Spec = in.Spec
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
@ -75,6 +75,11 @@ func (in *TokenReview) DeepCopyObject() runtime.Object {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) {
|
||||
*out = *in
|
||||
if in.Audiences != nil {
|
||||
in, out := &in.Audiences, &out.Audiences
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -92,6 +97,11 @@ func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec {
|
|||
func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) {
|
||||
*out = *in
|
||||
in.User.DeepCopyInto(&out.User)
|
||||
if in.Audiences != nil {
|
||||
in, out := &in.Audiences, &out.Audiences
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
1
vendor/k8s.io/api/authorization/v1/doc.go
generated
vendored
1
vendor/k8s.io/api/authorization/v1/doc.go
generated
vendored
|
@ -18,4 +18,5 @@ limitations under the License.
|
|||
// +k8s:openapi-gen=true
|
||||
|
||||
// +groupName=authorization.k8s.io
|
||||
|
||||
package v1 // import "k8s.io/api/authorization/v1"
|
||||
|
|
167
vendor/k8s.io/api/authorization/v1/generated.pb.go
generated
vendored
167
vendor/k8s.io/api/authorization/v1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1 is a generated protocol buffer package.
|
||||
|
@ -795,24 +794,6 @@ func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
@ -2888,51 +2869,14 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var keykey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
keykey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
if m.Extra == nil {
|
||||
m.Extra = make(map[string]ExtraValue)
|
||||
}
|
||||
if iNdEx < postIndex {
|
||||
var valuekey uint64
|
||||
var mapkey string
|
||||
mapvalue := &ExtraValue{}
|
||||
for iNdEx < postIndex {
|
||||
entryPreIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
|
@ -2942,46 +2886,85 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
valuekey |= (uint64(b) & 0x7F) << shift
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var mapmsglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
fieldNum := int32(wire >> 3)
|
||||
if fieldNum == 1 {
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iNdEx >= l {
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
mapmsglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
} else if fieldNum == 2 {
|
||||
var mapmsglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
mapmsglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postmsgIndex := iNdEx + mapmsglen
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postmsgIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue = &ExtraValue{}
|
||||
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postmsgIndex
|
||||
} else {
|
||||
iNdEx = entryPreIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postmsgIndex := iNdEx + mapmsglen
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postmsgIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue := &ExtraValue{}
|
||||
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postmsgIndex
|
||||
m.Extra[mapkey] = *mapvalue
|
||||
} else {
|
||||
var mapvalue ExtraValue
|
||||
m.Extra[mapkey] = mapvalue
|
||||
}
|
||||
m.Extra[mapkey] = *mapvalue
|
||||
iNdEx = postIndex
|
||||
case 6:
|
||||
if wireType != 2 {
|
||||
|
|
1
vendor/k8s.io/api/authorization/v1beta1/doc.go
generated
vendored
1
vendor/k8s.io/api/authorization/v1beta1/doc.go
generated
vendored
|
@ -18,4 +18,5 @@ limitations under the License.
|
|||
// +k8s:openapi-gen=true
|
||||
|
||||
// +groupName=authorization.k8s.io
|
||||
|
||||
package v1beta1 // import "k8s.io/api/authorization/v1beta1"
|
||||
|
|
167
vendor/k8s.io/api/authorization/v1beta1/generated.pb.go
generated
vendored
167
vendor/k8s.io/api/authorization/v1beta1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1beta1 is a generated protocol buffer package.
|
||||
|
@ -795,24 +794,6 @@ func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
@ -2888,51 +2869,14 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var keykey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
keykey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
if m.Extra == nil {
|
||||
m.Extra = make(map[string]ExtraValue)
|
||||
}
|
||||
if iNdEx < postIndex {
|
||||
var valuekey uint64
|
||||
var mapkey string
|
||||
mapvalue := &ExtraValue{}
|
||||
for iNdEx < postIndex {
|
||||
entryPreIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
|
@ -2942,46 +2886,85 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
valuekey |= (uint64(b) & 0x7F) << shift
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var mapmsglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
fieldNum := int32(wire >> 3)
|
||||
if fieldNum == 1 {
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iNdEx >= l {
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
mapmsglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
} else if fieldNum == 2 {
|
||||
var mapmsglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
mapmsglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postmsgIndex := iNdEx + mapmsglen
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postmsgIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue = &ExtraValue{}
|
||||
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postmsgIndex
|
||||
} else {
|
||||
iNdEx = entryPreIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postmsgIndex := iNdEx + mapmsglen
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postmsgIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue := &ExtraValue{}
|
||||
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postmsgIndex
|
||||
m.Extra[mapkey] = *mapvalue
|
||||
} else {
|
||||
var mapvalue ExtraValue
|
||||
m.Extra[mapkey] = mapvalue
|
||||
}
|
||||
m.Extra[mapkey] = *mapvalue
|
||||
iNdEx = postIndex
|
||||
case 6:
|
||||
if wireType != 2 {
|
||||
|
|
21
vendor/k8s.io/api/autoscaling/v1/generated.pb.go
generated
vendored
21
vendor/k8s.io/api/autoscaling/v1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1 is a generated protocol buffer package.
|
||||
|
@ -996,24 +995,6 @@ func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
|
8
vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go
generated
vendored
8
vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go
generated
vendored
|
@ -196,8 +196,8 @@ func (PodsMetricStatus) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_ResourceMetricSource = map[string]string{
|
||||
"": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.",
|
||||
"name": "name is the name of the resource in question.",
|
||||
"": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.",
|
||||
"name": "name is the name of the resource in question.",
|
||||
"targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.",
|
||||
"targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.",
|
||||
}
|
||||
|
@ -207,8 +207,8 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_ResourceMetricStatus = map[string]string{
|
||||
"": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
|
||||
"name": "name is the name of the resource in question.",
|
||||
"": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
|
||||
"name": "name is the name of the resource in question.",
|
||||
"currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.",
|
||||
"currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.",
|
||||
}
|
||||
|
|
21
vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go
generated
vendored
21
vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v2beta1 is a generated protocol buffer package.
|
||||
|
@ -916,24 +915,6 @@ func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
|
8
vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go
generated
vendored
8
vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go
generated
vendored
|
@ -197,8 +197,8 @@ func (PodsMetricStatus) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_ResourceMetricSource = map[string]string{
|
||||
"": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.",
|
||||
"name": "name is the name of the resource in question.",
|
||||
"": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.",
|
||||
"name": "name is the name of the resource in question.",
|
||||
"targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.",
|
||||
"targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.",
|
||||
}
|
||||
|
@ -208,8 +208,8 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_ResourceMetricStatus = map[string]string{
|
||||
"": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
|
||||
"name": "name is the name of the resource in question.",
|
||||
"": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.",
|
||||
"name": "name is the name of the resource in question.",
|
||||
"currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.",
|
||||
"currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.",
|
||||
}
|
||||
|
|
21
vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go
generated
vendored
21
vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v2beta2 is a generated protocol buffer package.
|
||||
|
@ -966,24 +965,6 @@ func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
|
21
vendor/k8s.io/api/batch/v1/generated.pb.go
generated
vendored
21
vendor/k8s.io/api/batch/v1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1 is a generated protocol buffer package.
|
||||
|
@ -343,24 +342,6 @@ func (m *JobStatus) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
|
21
vendor/k8s.io/api/batch/v1beta1/generated.pb.go
generated
vendored
21
vendor/k8s.io/api/batch/v1beta1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1beta1 is a generated protocol buffer package.
|
||||
|
@ -336,24 +335,6 @@ func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
|
21
vendor/k8s.io/api/batch/v2alpha1/generated.pb.go
generated
vendored
21
vendor/k8s.io/api/batch/v2alpha1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v2alpha1 is a generated protocol buffer package.
|
||||
|
@ -336,24 +335,6 @@ func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
|
1
vendor/k8s.io/api/certificates/v1beta1/doc.go
generated
vendored
1
vendor/k8s.io/api/certificates/v1beta1/doc.go
generated
vendored
|
@ -18,4 +18,5 @@ limitations under the License.
|
|||
// +k8s:openapi-gen=true
|
||||
|
||||
// +groupName=certificates.k8s.io
|
||||
|
||||
package v1beta1 // import "k8s.io/api/certificates/v1beta1"
|
||||
|
|
167
vendor/k8s.io/api/certificates/v1beta1/generated.pb.go
generated
vendored
167
vendor/k8s.io/api/certificates/v1beta1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1beta1 is a generated protocol buffer package.
|
||||
|
@ -378,24 +377,6 @@ func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
@ -1221,51 +1202,14 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var keykey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
keykey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
if m.Extra == nil {
|
||||
m.Extra = make(map[string]ExtraValue)
|
||||
}
|
||||
if iNdEx < postIndex {
|
||||
var valuekey uint64
|
||||
var mapkey string
|
||||
mapvalue := &ExtraValue{}
|
||||
for iNdEx < postIndex {
|
||||
entryPreIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
|
@ -1275,46 +1219,85 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
valuekey |= (uint64(b) & 0x7F) << shift
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var mapmsglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
fieldNum := int32(wire >> 3)
|
||||
if fieldNum == 1 {
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iNdEx >= l {
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
mapmsglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
} else if fieldNum == 2 {
|
||||
var mapmsglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
mapmsglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postmsgIndex := iNdEx + mapmsglen
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postmsgIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue = &ExtraValue{}
|
||||
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postmsgIndex
|
||||
} else {
|
||||
iNdEx = entryPreIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postmsgIndex := iNdEx + mapmsglen
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postmsgIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue := &ExtraValue{}
|
||||
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postmsgIndex
|
||||
m.Extra[mapkey] = *mapvalue
|
||||
} else {
|
||||
var mapvalue ExtraValue
|
||||
m.Extra[mapkey] = mapvalue
|
||||
}
|
||||
m.Extra[mapkey] = *mapvalue
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
|
|
1
vendor/k8s.io/api/coordination/v1beta1/doc.go
generated
vendored
1
vendor/k8s.io/api/coordination/v1beta1/doc.go
generated
vendored
|
@ -18,4 +18,5 @@ limitations under the License.
|
|||
// +k8s:openapi-gen=true
|
||||
|
||||
// +groupName=coordination.k8s.io
|
||||
|
||||
package v1beta1 // import "k8s.io/api/coordination/v1beta1"
|
||||
|
|
21
vendor/k8s.io/api/coordination/v1beta1/generated.pb.go
generated
vendored
21
vendor/k8s.io/api/coordination/v1beta1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1beta1 is a generated protocol buffer package.
|
||||
|
@ -196,24 +195,6 @@ func (m *LeaseSpec) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
|
19
vendor/k8s.io/api/core/v1/annotation_key_constants.go
generated
vendored
19
vendor/k8s.io/api/core/v1/annotation_key_constants.go
generated
vendored
|
@ -78,4 +78,23 @@ const (
|
|||
//
|
||||
// Not all cloud providers support this annotation, though AWS & GCE do.
|
||||
AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges"
|
||||
|
||||
// EndpointsLastChangeTriggerTime is the annotation key, set for endpoints objects, that
|
||||
// represents the timestamp (stored as RFC 3339 date-time string, e.g. '2018-10-22T19:32:52.1Z')
|
||||
// of the last change, of some Pod or Service object, that triggered the endpoints object change.
|
||||
// In other words, if a Pod / Service changed at time T0, that change was observed by endpoints
|
||||
// controller at T1, and the Endpoints object was changed at T2, the
|
||||
// EndpointsLastChangeTriggerTime would be set to T0.
|
||||
//
|
||||
// The "endpoints change trigger" here means any Pod or Service change that resulted in the
|
||||
// Endpoints object change.
|
||||
//
|
||||
// Given the definition of the "endpoints change trigger", please note that this annotation will
|
||||
// be set ONLY for endpoints object changes triggered by either Pod or Service change. If the
|
||||
// Endpoints object changes due to other reasons, this annotation won't be set (or updated if it's
|
||||
// already set).
|
||||
//
|
||||
// This annotation will be used to compute the in-cluster network programming latency SLI, see
|
||||
// https://github.com/kubernetes/community/blob/master/sig-scalability/slos/network_programming_latency.md
|
||||
EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time"
|
||||
)
|
||||
|
|
5730
vendor/k8s.io/api/core/v1/generated.pb.go
generated
vendored
5730
vendor/k8s.io/api/core/v1/generated.pb.go
generated
vendored
File diff suppressed because it is too large
Load diff
72
vendor/k8s.io/api/core/v1/generated.proto
generated
vendored
72
vendor/k8s.io/api/core/v1/generated.proto
generated
vendored
|
@ -31,7 +31,7 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto";
|
|||
option go_package = "v1";
|
||||
|
||||
// Represents a Persistent Disk resource in AWS.
|
||||
//
|
||||
//
|
||||
// An AWS EBS disk must exist before mounting to a container. The disk
|
||||
// must also be in the same AWS zone as the kubelet. An AWS EBS disk
|
||||
// can only be mounted as read/write once. AWS EBS volumes support
|
||||
|
@ -198,7 +198,7 @@ message CSIPersistentVolumeSource {
|
|||
// ControllerPublishSecretRef is a reference to the secret object containing
|
||||
// sensitive information to pass to the CSI driver to complete the CSI
|
||||
// ControllerPublishVolume and ControllerUnpublishVolume calls.
|
||||
// This field is optional, and may be empty if no secret is required. If the
|
||||
// This field is optional, and may be empty if no secret is required. If the
|
||||
// secret object contains more than one secret, all secrets are passed.
|
||||
// +optional
|
||||
optional SecretReference controllerPublishSecretRef = 6;
|
||||
|
@ -206,7 +206,7 @@ message CSIPersistentVolumeSource {
|
|||
// NodeStageSecretRef is a reference to the secret object containing sensitive
|
||||
// information to pass to the CSI driver to complete the CSI NodeStageVolume
|
||||
// and NodeStageVolume and NodeUnstageVolume calls.
|
||||
// This field is optional, and may be empty if no secret is required. If the
|
||||
// This field is optional, and may be empty if no secret is required. If the
|
||||
// secret object contains more than one secret, all secrets are passed.
|
||||
// +optional
|
||||
optional SecretReference nodeStageSecretRef = 7;
|
||||
|
@ -214,7 +214,7 @@ message CSIPersistentVolumeSource {
|
|||
// NodePublishSecretRef is a reference to the secret object containing
|
||||
// sensitive information to pass to the CSI driver to complete the CSI
|
||||
// NodePublishVolume and NodeUnpublishVolume calls.
|
||||
// This field is optional, and may be empty if no secret is required. If the
|
||||
// This field is optional, and may be empty if no secret is required. If the
|
||||
// secret object contains more than one secret, all secrets are passed.
|
||||
// +optional
|
||||
optional SecretReference nodePublishSecretRef = 8;
|
||||
|
@ -436,7 +436,7 @@ message ConfigMap {
|
|||
|
||||
// ConfigMapEnvSource selects a ConfigMap to populate the environment
|
||||
// variables with.
|
||||
//
|
||||
//
|
||||
// The contents of the target ConfigMap's Data field will represent the
|
||||
// key-value pairs as environment variables.
|
||||
message ConfigMapEnvSource {
|
||||
|
@ -497,7 +497,7 @@ message ConfigMapNodeConfigSource {
|
|||
}
|
||||
|
||||
// Adapts a ConfigMap into a projected volume.
|
||||
//
|
||||
//
|
||||
// The contents of the target ConfigMap's Data field will be presented in a
|
||||
// projected volume as files using the keys in the Data field as the file names,
|
||||
// unless the items element is populated with specific mappings of keys to paths.
|
||||
|
@ -522,7 +522,7 @@ message ConfigMapProjection {
|
|||
}
|
||||
|
||||
// Adapts a ConfigMap into a volume.
|
||||
//
|
||||
//
|
||||
// The contents of the target ConfigMap's Data field will be presented in a
|
||||
// volume as files using the keys in the Data field as the file names, unless
|
||||
// the items element is populated with specific mappings of keys to paths.
|
||||
|
@ -606,6 +606,9 @@ message Container {
|
|||
// +optional
|
||||
// +patchMergeKey=containerPort
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=containerPort
|
||||
// +listMapKey=protocol
|
||||
repeated ContainerPort ports = 6;
|
||||
|
||||
// List of sources to populate environment variables in the container.
|
||||
|
@ -638,7 +641,7 @@ message Container {
|
|||
repeated VolumeMount volumeMounts = 9;
|
||||
|
||||
// volumeDevices is the list of block devices to be used by the container.
|
||||
// This is an alpha feature and may change in the future.
|
||||
// This is a beta feature.
|
||||
// +patchMergeKey=devicePath
|
||||
// +patchStrategy=merge
|
||||
// +optional
|
||||
|
@ -1314,7 +1317,7 @@ message FlockerVolumeSource {
|
|||
}
|
||||
|
||||
// Represents a Persistent Disk resource in Google Compute Engine.
|
||||
//
|
||||
//
|
||||
// A GCE PD must exist before mounting to a container. The disk must
|
||||
// also be in the same GCE project and zone as the kubelet. A GCE PD
|
||||
// can only be mounted as read/write once or read-only many times. GCE
|
||||
|
@ -1350,7 +1353,7 @@ message GCEPersistentDiskVolumeSource {
|
|||
// Represents a volume that is populated with the contents of a git repository.
|
||||
// Git repo volumes do not support ownership management.
|
||||
// Git repo volumes support SELinux relabeling.
|
||||
//
|
||||
//
|
||||
// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an
|
||||
// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir
|
||||
// into the Pod's container.
|
||||
|
@ -1370,6 +1373,30 @@ message GitRepoVolumeSource {
|
|||
optional string directory = 3;
|
||||
}
|
||||
|
||||
// Represents a Glusterfs mount that lasts the lifetime of a pod.
|
||||
// Glusterfs volumes do not support ownership management or SELinux relabeling.
|
||||
message GlusterfsPersistentVolumeSource {
|
||||
// EndpointsName is the endpoint name that details Glusterfs topology.
|
||||
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
|
||||
optional string endpoints = 1;
|
||||
|
||||
// Path is the Glusterfs volume path.
|
||||
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
|
||||
optional string path = 2;
|
||||
|
||||
// ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.
|
||||
// Defaults to false.
|
||||
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
|
||||
// +optional
|
||||
optional bool readOnly = 3;
|
||||
|
||||
// EndpointsNamespace is the namespace that contains Glusterfs endpoint.
|
||||
// If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC.
|
||||
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
|
||||
// +optional
|
||||
optional string endpointsNamespace = 4;
|
||||
}
|
||||
|
||||
// Represents a Glusterfs mount that lasts the lifetime of a pod.
|
||||
// Glusterfs volumes do not support ownership management or SELinux relabeling.
|
||||
message GlusterfsVolumeSource {
|
||||
|
@ -2293,7 +2320,7 @@ message PersistentVolumeClaimSpec {
|
|||
|
||||
// volumeMode defines what type of volume is required by the claim.
|
||||
// Value of Filesystem is implied when not included in claim spec.
|
||||
// This is an alpha feature and may change in the future.
|
||||
// This is a beta feature.
|
||||
// +optional
|
||||
optional string volumeMode = 6;
|
||||
|
||||
|
@ -2386,7 +2413,7 @@ message PersistentVolumeSource {
|
|||
// exposed to the pod. Provisioned by an admin.
|
||||
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
|
||||
// +optional
|
||||
optional GlusterfsVolumeSource glusterfs = 4;
|
||||
optional GlusterfsPersistentVolumeSource glusterfs = 4;
|
||||
|
||||
// NFS represents an NFS mount on the host. Provisioned by an admin.
|
||||
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
|
||||
|
@ -2509,7 +2536,7 @@ message PersistentVolumeSpec {
|
|||
|
||||
// volumeMode defines if a volume is intended to be used with a formatted filesystem
|
||||
// or to remain in raw block state. Value of Filesystem is implied when not included in spec.
|
||||
// This is an alpha feature and may change in the future.
|
||||
// This is a beta feature.
|
||||
// +optional
|
||||
optional string volumeMode = 8;
|
||||
|
||||
|
@ -2899,11 +2926,11 @@ message PodSecurityContext {
|
|||
// A special supplemental group that applies to all containers in a pod.
|
||||
// Some volume types allow the Kubelet to change the ownership of that volume
|
||||
// to be owned by the pod:
|
||||
//
|
||||
//
|
||||
// 1. The owning GID will be the FSGroup
|
||||
// 2. The setgid bit is set (new files created in the volume will be owned by FSGroup)
|
||||
// 3. The permission bits are OR'd with rw-rw----
|
||||
//
|
||||
//
|
||||
// If unset, the Kubelet will not modify the ownership and permissions of any volume.
|
||||
// +optional
|
||||
optional int64 fsGroup = 5;
|
||||
|
@ -3126,6 +3153,11 @@ message PodSpec {
|
|||
// This is an alpha feature and may change in the future.
|
||||
// +optional
|
||||
optional string runtimeClassName = 29;
|
||||
|
||||
// EnableServiceLinks indicates whether information about services should be injected into pod's
|
||||
// environment variables, matching the syntax of Docker links.
|
||||
// +optional
|
||||
optional bool enableServiceLinks = 30;
|
||||
}
|
||||
|
||||
// PodStatus represents information about the status of a pod. Status may trail the actual
|
||||
|
@ -3136,7 +3168,7 @@ message PodStatus {
|
|||
// The conditions array, the reason and message fields, and the individual container status
|
||||
// arrays contain more detail about the pod's status.
|
||||
// There are five possible phase values:
|
||||
//
|
||||
//
|
||||
// Pending: The pod has been accepted by the Kubernetes system, but one or more of the
|
||||
// container images has not been created. This includes time before being scheduled as
|
||||
// well as time spent downloading images over the network, which could take a while.
|
||||
|
@ -3148,7 +3180,7 @@ message PodStatus {
|
|||
// by the system.
|
||||
// Unknown: For some reason the state of the pod could not be obtained, typically due to an
|
||||
// error in communicating with the host of the pod.
|
||||
//
|
||||
//
|
||||
// More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase
|
||||
// +optional
|
||||
optional string phase = 1;
|
||||
|
@ -3879,7 +3911,7 @@ message Secret {
|
|||
|
||||
// SecretEnvSource selects a Secret to populate the environment
|
||||
// variables with.
|
||||
//
|
||||
//
|
||||
// The contents of the target Secret's Data field will represent the
|
||||
// key-value pairs as environment variables.
|
||||
message SecretEnvSource {
|
||||
|
@ -3917,7 +3949,7 @@ message SecretList {
|
|||
}
|
||||
|
||||
// Adapts a secret into a projected volume.
|
||||
//
|
||||
//
|
||||
// The contents of the target Secret's Data field will be presented in a
|
||||
// projected volume as files using the keys in the Data field as the file names.
|
||||
// Note that this is identical to a secret volume source without the default
|
||||
|
@ -3953,7 +3985,7 @@ message SecretReference {
|
|||
}
|
||||
|
||||
// Adapts a Secret into a volume.
|
||||
//
|
||||
//
|
||||
// The contents of the target Secret's Data field will be presented in a volume
|
||||
// as files using the keys in the Data field as the file names.
|
||||
// Secret volumes support ownership management and SELinux relabeling.
|
||||
|
|
58
vendor/k8s.io/api/core/v1/types.go
generated
vendored
58
vendor/k8s.io/api/core/v1/types.go
generated
vendored
|
@ -191,7 +191,7 @@ type PersistentVolumeSource struct {
|
|||
// exposed to the pod. Provisioned by an admin.
|
||||
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
|
||||
// +optional
|
||||
Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"`
|
||||
Glusterfs *GlusterfsPersistentVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"`
|
||||
// NFS represents an NFS mount on the host. Provisioned by an admin.
|
||||
// More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs
|
||||
// +optional
|
||||
|
@ -326,7 +326,7 @@ type PersistentVolumeSpec struct {
|
|||
MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"`
|
||||
// volumeMode defines if a volume is intended to be used with a formatted filesystem
|
||||
// or to remain in raw block state. Value of Filesystem is implied when not included in spec.
|
||||
// This is an alpha feature and may change in the future.
|
||||
// This is a beta feature.
|
||||
// +optional
|
||||
VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"`
|
||||
// NodeAffinity defines constraints that limit what nodes this volume can be accessed from.
|
||||
|
@ -455,7 +455,7 @@ type PersistentVolumeClaimSpec struct {
|
|||
StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"`
|
||||
// volumeMode defines what type of volume is required by the claim.
|
||||
// Value of Filesystem is implied when not included in claim spec.
|
||||
// This is an alpha feature and may change in the future.
|
||||
// This is a beta feature.
|
||||
// +optional
|
||||
VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"`
|
||||
// This field requires the VolumeSnapshotDataSource alpha feature gate to be
|
||||
|
@ -636,6 +636,30 @@ type GlusterfsVolumeSource struct {
|
|||
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
|
||||
}
|
||||
|
||||
// Represents a Glusterfs mount that lasts the lifetime of a pod.
|
||||
// Glusterfs volumes do not support ownership management or SELinux relabeling.
|
||||
type GlusterfsPersistentVolumeSource struct {
|
||||
// EndpointsName is the endpoint name that details Glusterfs topology.
|
||||
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
|
||||
EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"`
|
||||
|
||||
// Path is the Glusterfs volume path.
|
||||
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
|
||||
Path string `json:"path" protobuf:"bytes,2,opt,name=path"`
|
||||
|
||||
// ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions.
|
||||
// Defaults to false.
|
||||
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
|
||||
// +optional
|
||||
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
|
||||
|
||||
// EndpointsNamespace is the namespace that contains Glusterfs endpoint.
|
||||
// If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC.
|
||||
// More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
|
||||
// +optional
|
||||
EndpointsNamespace *string `json:"endpointsNamespace,omitempty" protobuf:"bytes,4,opt,name=endpointsNamespace"`
|
||||
}
|
||||
|
||||
// Represents a Rados Block Device mount that lasts the lifetime of a pod.
|
||||
// RBD volumes support ownership management and SELinux relabeling.
|
||||
type RBDVolumeSource struct {
|
||||
|
@ -1640,7 +1664,7 @@ type CSIPersistentVolumeSource struct {
|
|||
// ControllerPublishSecretRef is a reference to the secret object containing
|
||||
// sensitive information to pass to the CSI driver to complete the CSI
|
||||
// ControllerPublishVolume and ControllerUnpublishVolume calls.
|
||||
// This field is optional, and may be empty if no secret is required. If the
|
||||
// This field is optional, and may be empty if no secret is required. If the
|
||||
// secret object contains more than one secret, all secrets are passed.
|
||||
// +optional
|
||||
ControllerPublishSecretRef *SecretReference `json:"controllerPublishSecretRef,omitempty" protobuf:"bytes,6,opt,name=controllerPublishSecretRef"`
|
||||
|
@ -1648,7 +1672,7 @@ type CSIPersistentVolumeSource struct {
|
|||
// NodeStageSecretRef is a reference to the secret object containing sensitive
|
||||
// information to pass to the CSI driver to complete the CSI NodeStageVolume
|
||||
// and NodeStageVolume and NodeUnstageVolume calls.
|
||||
// This field is optional, and may be empty if no secret is required. If the
|
||||
// This field is optional, and may be empty if no secret is required. If the
|
||||
// secret object contains more than one secret, all secrets are passed.
|
||||
// +optional
|
||||
NodeStageSecretRef *SecretReference `json:"nodeStageSecretRef,omitempty" protobuf:"bytes,7,opt,name=nodeStageSecretRef"`
|
||||
|
@ -1656,7 +1680,7 @@ type CSIPersistentVolumeSource struct {
|
|||
// NodePublishSecretRef is a reference to the secret object containing
|
||||
// sensitive information to pass to the CSI driver to complete the CSI
|
||||
// NodePublishVolume and NodeUnpublishVolume calls.
|
||||
// This field is optional, and may be empty if no secret is required. If the
|
||||
// This field is optional, and may be empty if no secret is required. If the
|
||||
// secret object contains more than one secret, all secrets are passed.
|
||||
// +optional
|
||||
NodePublishSecretRef *SecretReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,8,opt,name=nodePublishSecretRef"`
|
||||
|
@ -2060,6 +2084,9 @@ type Container struct {
|
|||
// +optional
|
||||
// +patchMergeKey=containerPort
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=containerPort
|
||||
// +listMapKey=protocol
|
||||
Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"`
|
||||
// List of sources to populate environment variables in the container.
|
||||
// The keys defined within a source must be a C_IDENTIFIER. All invalid keys
|
||||
|
@ -2087,7 +2114,7 @@ type Container struct {
|
|||
// +patchStrategy=merge
|
||||
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"`
|
||||
// volumeDevices is the list of block devices to be used by the container.
|
||||
// This is an alpha feature and may change in the future.
|
||||
// This is a beta feature.
|
||||
// +patchMergeKey=devicePath
|
||||
// +patchStrategy=merge
|
||||
// +optional
|
||||
|
@ -2891,8 +2918,17 @@ type PodSpec struct {
|
|||
// This is an alpha feature and may change in the future.
|
||||
// +optional
|
||||
RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"`
|
||||
// EnableServiceLinks indicates whether information about services should be injected into pod's
|
||||
// environment variables, matching the syntax of Docker links.
|
||||
// +optional
|
||||
EnableServiceLinks *bool `json:"enableServiceLinks,omitempty" protobuf:"varint,30,opt,name=enableServiceLinks"`
|
||||
}
|
||||
|
||||
const (
|
||||
// The default value for enableServiceLinks attribute.
|
||||
DefaultEnableServiceLinks = true
|
||||
)
|
||||
|
||||
// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the
|
||||
// pod's hosts file.
|
||||
type HostAlias struct {
|
||||
|
@ -3273,8 +3309,8 @@ type ReplicationControllerCondition struct {
|
|||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/extensions/v1beta1.Scale
|
||||
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/extensions/v1beta1.Scale,result=k8s.io/api/extensions/v1beta1.Scale
|
||||
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ReplicationController represents the configuration of a replication controller.
|
||||
|
@ -4987,6 +5023,10 @@ const (
|
|||
TLSCertKey = "tls.crt"
|
||||
// TLSPrivateKeyKey is the key for the private key field in a TLS secret.
|
||||
TLSPrivateKeyKey = "tls.key"
|
||||
// SecretTypeBootstrapToken is used during the automated bootstrap process (first
|
||||
// implemented by kubeadm). It stores tokens that are used to sign well known
|
||||
// ConfigMaps. They are used for authn.
|
||||
SecretTypeBootstrapToken SecretType = "bootstrap.kubernetes.io/token"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
|
63
vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
generated
vendored
63
vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go
generated
vendored
|
@ -123,9 +123,9 @@ var map_CSIPersistentVolumeSource = map[string]string{
|
|||
"readOnly": "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).",
|
||||
"fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\".",
|
||||
"volumeAttributes": "Attributes of the volume to publish.",
|
||||
"controllerPublishSecretRef": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
|
||||
"nodeStageSecretRef": "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
|
||||
"nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
|
||||
"controllerPublishSecretRef": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
|
||||
"nodeStageSecretRef": "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
|
||||
"nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.",
|
||||
}
|
||||
|
||||
func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string {
|
||||
|
@ -321,7 +321,7 @@ var map_Container = map[string]string{
|
|||
"env": "List of environment variables to set in the container. Cannot be updated.",
|
||||
"resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/",
|
||||
"volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.",
|
||||
"volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is an alpha feature and may change in the future.",
|
||||
"volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is a beta feature.",
|
||||
"livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
|
||||
"readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
|
||||
"lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.",
|
||||
|
@ -695,6 +695,18 @@ func (GitRepoVolumeSource) SwaggerDoc() map[string]string {
|
|||
return map_GitRepoVolumeSource
|
||||
}
|
||||
|
||||
var map_GlusterfsPersistentVolumeSource = map[string]string{
|
||||
"": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.",
|
||||
"endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod",
|
||||
"path": "Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod",
|
||||
"readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod",
|
||||
"endpointsNamespace": "EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod",
|
||||
}
|
||||
|
||||
func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string {
|
||||
return map_GlusterfsPersistentVolumeSource
|
||||
}
|
||||
|
||||
var map_GlusterfsVolumeSource = map[string]string{
|
||||
"": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.",
|
||||
"endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod",
|
||||
|
@ -1210,7 +1222,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{
|
|||
"resources": "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources",
|
||||
"volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.",
|
||||
"storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1",
|
||||
"volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is an alpha feature and may change in the future.",
|
||||
"volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is a beta feature.",
|
||||
"dataSource": "This field requires the VolumeSnapshotDataSource alpha feature gate to be enabled and currently VolumeSnapshot is the only supported data source. If the provisioner can support VolumeSnapshot data source, it will create a new volume and data will be restored to the volume at the same time. If the provisioner does not support VolumeSnapshot data source, volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.",
|
||||
}
|
||||
|
||||
|
@ -1288,7 +1300,7 @@ var map_PersistentVolumeSpec = map[string]string{
|
|||
"persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming",
|
||||
"storageClassName": "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.",
|
||||
"mountOptions": "A list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options",
|
||||
"volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is an alpha feature and may change in the future.",
|
||||
"volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is a beta feature.",
|
||||
"nodeAffinity": "NodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.",
|
||||
}
|
||||
|
||||
|
@ -1528,6 +1540,7 @@ var map_PodSpec = map[string]string{
|
|||
"dnsConfig": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.",
|
||||
"readinessGates": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md",
|
||||
"runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md This is an alpha feature and may change in the future.",
|
||||
"enableServiceLinks": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links.",
|
||||
}
|
||||
|
||||
func (PodSpec) SwaggerDoc() map[string]string {
|
||||
|
@ -1636,7 +1649,7 @@ func (PreferredSchedulingTerm) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_Probe = map[string]string{
|
||||
"": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.",
|
||||
"": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.",
|
||||
"initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
|
||||
"timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes",
|
||||
"periodSeconds": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.",
|
||||
|
@ -2201,7 +2214,7 @@ func (TopologySelectorLabelRequirement) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_TopologySelectorTerm = map[string]string{
|
||||
"": "A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.",
|
||||
"": "A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.",
|
||||
"matchLabelExpressions": "A list of topology selector requirements by labels.",
|
||||
}
|
||||
|
||||
|
@ -2285,23 +2298,23 @@ var map_VolumeSource = map[string]string{
|
|||
"iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md",
|
||||
"glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md",
|
||||
"persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
|
||||
"rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md",
|
||||
"flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
|
||||
"cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
|
||||
"cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
|
||||
"flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running",
|
||||
"downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume",
|
||||
"fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
|
||||
"azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
|
||||
"configMap": "ConfigMap represents a configMap that should populate this volume",
|
||||
"vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
|
||||
"quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
|
||||
"azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
|
||||
"photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
|
||||
"projected": "Items for all in one resources secrets, configmaps, and downward API",
|
||||
"portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine",
|
||||
"scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
|
||||
"storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.",
|
||||
"rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md",
|
||||
"flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.",
|
||||
"cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
|
||||
"cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
|
||||
"flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running",
|
||||
"downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume",
|
||||
"fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
|
||||
"azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
|
||||
"configMap": "ConfigMap represents a configMap that should populate this volume",
|
||||
"vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
|
||||
"quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime",
|
||||
"azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.",
|
||||
"photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine",
|
||||
"projected": "Items for all in one resources secrets, configmaps, and downward API",
|
||||
"portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine",
|
||||
"scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
|
||||
"storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.",
|
||||
}
|
||||
|
||||
func (VolumeSource) SwaggerDoc() map[string]string {
|
||||
|
|
30
vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
generated
vendored
30
vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go
generated
vendored
|
@ -1498,6 +1498,27 @@ func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GlusterfsPersistentVolumeSource) DeepCopyInto(out *GlusterfsPersistentVolumeSource) {
|
||||
*out = *in
|
||||
if in.EndpointsNamespace != nil {
|
||||
in, out := &in.EndpointsNamespace, &out.EndpointsNamespace
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsPersistentVolumeSource.
|
||||
func (in *GlusterfsPersistentVolumeSource) DeepCopy() *GlusterfsPersistentVolumeSource {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(GlusterfsPersistentVolumeSource)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) {
|
||||
*out = *in
|
||||
|
@ -2806,8 +2827,8 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) {
|
|||
}
|
||||
if in.Glusterfs != nil {
|
||||
in, out := &in.Glusterfs, &out.Glusterfs
|
||||
*out = new(GlusterfsVolumeSource)
|
||||
**out = **in
|
||||
*out = new(GlusterfsPersistentVolumeSource)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.NFS != nil {
|
||||
in, out := &in.NFS, &out.NFS
|
||||
|
@ -3554,6 +3575,11 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) {
|
|||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.EnableServiceLinks != nil {
|
||||
in, out := &in.EnableServiceLinks, &out.EnableServiceLinks
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
1
vendor/k8s.io/api/events/v1beta1/doc.go
generated
vendored
1
vendor/k8s.io/api/events/v1beta1/doc.go
generated
vendored
|
@ -18,4 +18,5 @@ limitations under the License.
|
|||
// +k8s:openapi-gen=true
|
||||
|
||||
// +groupName=events.k8s.io
|
||||
|
||||
package v1beta1 // import "k8s.io/api/events/v1beta1"
|
||||
|
|
21
vendor/k8s.io/api/events/v1beta1/generated.pb.go
generated
vendored
21
vendor/k8s.io/api/events/v1beta1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1beta1 is a generated protocol buffer package.
|
||||
|
@ -254,24 +253,6 @@ func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
|
2056
vendor/k8s.io/api/extensions/v1beta1/generated.pb.go
generated
vendored
2056
vendor/k8s.io/api/extensions/v1beta1/generated.pb.go
generated
vendored
File diff suppressed because it is too large
Load diff
56
vendor/k8s.io/api/extensions/v1beta1/generated.proto
generated
vendored
56
vendor/k8s.io/api/extensions/v1beta1/generated.proto
generated
vendored
|
@ -22,7 +22,6 @@ syntax = 'proto2';
|
|||
package k8s.io.api.extensions.v1beta1;
|
||||
|
||||
import "k8s.io/api/core/v1/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/api/resource/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
|
||||
|
@ -45,7 +44,7 @@ message AllowedHostPath {
|
|||
// pathPrefix is the path prefix that the host volume must match.
|
||||
// It does not support `*`.
|
||||
// Trailing slashes are trimmed when validating the path prefix with a host path.
|
||||
//
|
||||
//
|
||||
// Examples:
|
||||
// `/foo` would allow `/foo`, `/foo/` and `/foo/bar`
|
||||
// `/foo` would not allow `/food` or `/etc/foo`
|
||||
|
@ -56,31 +55,6 @@ message AllowedHostPath {
|
|||
optional bool readOnly = 2;
|
||||
}
|
||||
|
||||
message CustomMetricCurrentStatus {
|
||||
// Custom Metric name.
|
||||
optional string name = 1;
|
||||
|
||||
// Custom Metric value (average).
|
||||
optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2;
|
||||
}
|
||||
|
||||
message CustomMetricCurrentStatusList {
|
||||
repeated CustomMetricCurrentStatus items = 1;
|
||||
}
|
||||
|
||||
// Alpha-level support for Custom Metrics in HPA (as annotations).
|
||||
message CustomMetricTarget {
|
||||
// Custom Metric name.
|
||||
optional string name = 1;
|
||||
|
||||
// Custom Metric value (average).
|
||||
optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2;
|
||||
}
|
||||
|
||||
message CustomMetricTargetList {
|
||||
repeated CustomMetricTarget items = 1;
|
||||
}
|
||||
|
||||
// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for
|
||||
// more information.
|
||||
// DaemonSet represents the configuration of a daemon set.
|
||||
|
@ -335,6 +309,8 @@ message DeploymentSpec {
|
|||
|
||||
// The number of old ReplicaSets to retain to allow rollback.
|
||||
// This is a pointer to distinguish between explicit zero and not specified.
|
||||
// This is set to the max value of int32 (i.e. 2147483647) by default, which
|
||||
// means "retaining all old RelicaSets".
|
||||
// +optional
|
||||
optional int32 revisionHistoryLimit = 6;
|
||||
|
||||
|
@ -688,7 +664,7 @@ message NetworkPolicyList {
|
|||
message NetworkPolicyPeer {
|
||||
// This is a label selector which selects Pods. This field follows standard label
|
||||
// selector semantics; if present but empty, it selects all pods.
|
||||
//
|
||||
//
|
||||
// If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects
|
||||
// the Pods matching PodSelector in the Namespaces selected by NamespaceSelector.
|
||||
// Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.
|
||||
|
@ -697,7 +673,7 @@ message NetworkPolicyPeer {
|
|||
|
||||
// Selects Namespaces using cluster-scoped labels. This field follows standard label
|
||||
// selector semantics; if present but empty, it selects all namespaces.
|
||||
//
|
||||
//
|
||||
// If PodSelector is also set, then the NetworkPolicyPeer as a whole selects
|
||||
// the Pods matching PodSelector in the Namespaces selected by NamespaceSelector.
|
||||
// Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.
|
||||
|
@ -847,6 +823,12 @@ message PodSecurityPolicySpec {
|
|||
// runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.
|
||||
optional RunAsUserStrategyOptions runAsUser = 11;
|
||||
|
||||
// RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set.
|
||||
// If this field is omitted, the pod's RunAsGroup can take any value. This field requires the
|
||||
// RunAsGroup feature gate to be enabled.
|
||||
// +optional
|
||||
optional RunAsGroupStrategyOptions runAsGroup = 22;
|
||||
|
||||
// supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
|
||||
optional SupplementalGroupsStrategyOptions supplementalGroups = 12;
|
||||
|
||||
|
@ -886,7 +868,7 @@ message PodSecurityPolicySpec {
|
|||
// Each entry is either a plain sysctl name or ends in "*" in which case it is considered
|
||||
// as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed.
|
||||
// Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.
|
||||
//
|
||||
//
|
||||
// Examples:
|
||||
// e.g. "foo/*" allows "foo/bar", "foo/baz", etc.
|
||||
// e.g. "foo.*" allows "foo.bar", "foo.baz", etc.
|
||||
|
@ -896,7 +878,7 @@ message PodSecurityPolicySpec {
|
|||
// forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none.
|
||||
// Each entry is either a plain sysctl name or ends in "*" in which case it is considered
|
||||
// as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.
|
||||
//
|
||||
//
|
||||
// Examples:
|
||||
// e.g. "foo/*" forbids "foo/bar", "foo/baz", etc.
|
||||
// e.g. "foo.*" forbids "foo.bar", "foo.baz", etc.
|
||||
|
@ -1086,6 +1068,18 @@ message RollingUpdateDeployment {
|
|||
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2;
|
||||
}
|
||||
|
||||
// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.
|
||||
// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead.
|
||||
message RunAsGroupStrategyOptions {
|
||||
// rule is the strategy that will dictate the allowable RunAsGroup values that may be set.
|
||||
optional string rule = 1;
|
||||
|
||||
// ranges are the allowed ranges of gids that may be used. If you would like to force a single gid
|
||||
// then supply a single range with the same start and end. Required for MustRunAs.
|
||||
// +optional
|
||||
repeated IDRange ranges = 2;
|
||||
}
|
||||
|
||||
// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.
|
||||
// Deprecated: use RunAsUserStrategyOptions from policy API Group instead.
|
||||
message RunAsUserStrategyOptions {
|
||||
|
|
61
vendor/k8s.io/api/extensions/v1beta1/types.go
generated
vendored
61
vendor/k8s.io/api/extensions/v1beta1/types.go
generated
vendored
|
@ -19,7 +19,6 @@ package v1beta1
|
|||
import (
|
||||
appsv1beta1 "k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
@ -50,8 +49,6 @@ type ScaleStatus struct {
|
|||
TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:noVerbs
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// represents a scaling request for a resource.
|
||||
|
@ -77,29 +74,6 @@ type ReplicationControllerDummy struct {
|
|||
metav1.TypeMeta `json:",inline"`
|
||||
}
|
||||
|
||||
// Alpha-level support for Custom Metrics in HPA (as annotations).
|
||||
type CustomMetricTarget struct {
|
||||
// Custom Metric name.
|
||||
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
|
||||
// Custom Metric value (average).
|
||||
TargetValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"`
|
||||
}
|
||||
|
||||
type CustomMetricTargetList struct {
|
||||
Items []CustomMetricTarget `json:"items" protobuf:"bytes,1,rep,name=items"`
|
||||
}
|
||||
|
||||
type CustomMetricCurrentStatus struct {
|
||||
// Custom Metric name.
|
||||
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
|
||||
// Custom Metric value (average).
|
||||
CurrentValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"`
|
||||
}
|
||||
|
||||
type CustomMetricCurrentStatusList struct {
|
||||
Items []CustomMetricCurrentStatus `json:"items" protobuf:"bytes,1,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale
|
||||
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale
|
||||
|
@ -151,6 +125,8 @@ type DeploymentSpec struct {
|
|||
|
||||
// The number of old ReplicaSets to retain to allow rollback.
|
||||
// This is a pointer to distinguish between explicit zero and not specified.
|
||||
// This is set to the max value of int32 (i.e. 2147483647) by default, which
|
||||
// means "retaining all old RelicaSets".
|
||||
// +optional
|
||||
RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"`
|
||||
|
||||
|
@ -918,6 +894,11 @@ type PodSecurityPolicySpec struct {
|
|||
SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"`
|
||||
// runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.
|
||||
RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"`
|
||||
// RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set.
|
||||
// If this field is omitted, the pod's RunAsGroup can take any value. This field requires the
|
||||
// RunAsGroup feature gate to be enabled.
|
||||
// +optional
|
||||
RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"`
|
||||
// supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
|
||||
SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"`
|
||||
// fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.
|
||||
|
@ -1072,6 +1053,17 @@ type RunAsUserStrategyOptions struct {
|
|||
Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
|
||||
}
|
||||
|
||||
// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.
|
||||
// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead.
|
||||
type RunAsGroupStrategyOptions struct {
|
||||
// rule is the strategy that will dictate the allowable RunAsGroup values that may be set.
|
||||
Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"`
|
||||
// ranges are the allowed ranges of gids that may be used. If you would like to force a single gid
|
||||
// then supply a single range with the same start and end. Required for MustRunAs.
|
||||
// +optional
|
||||
Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
|
||||
}
|
||||
|
||||
// IDRange provides a min/max of an allowed range of IDs.
|
||||
// Deprecated: use IDRange from policy API Group instead.
|
||||
type IDRange struct {
|
||||
|
@ -1098,6 +1090,23 @@ const (
|
|||
RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny"
|
||||
)
|
||||
|
||||
// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a
|
||||
// Security Context.
|
||||
// Deprecated: use RunAsGroupStrategy from policy API Group instead.
|
||||
type RunAsGroupStrategy string
|
||||
|
||||
const (
|
||||
// RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid.
|
||||
// However, when RunAsGroup are specified, they have to fall in the defined range.
|
||||
RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs"
|
||||
// RunAsGroupStrategyMustRunAs means that container must run as a particular gid.
|
||||
// Deprecated: use RunAsGroupStrategyMustRunAs from policy API Group instead.
|
||||
RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs"
|
||||
// RunAsGroupStrategyRunAsAny means that container may make requests for any gid.
|
||||
// Deprecated: use RunAsGroupStrategyRunAsAny from policy API Group instead.
|
||||
RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny"
|
||||
)
|
||||
|
||||
// FSGroupStrategyOptions defines the strategy type and options used to create the strategy.
|
||||
// Deprecated: use FSGroupStrategyOptions from policy API Group instead.
|
||||
type FSGroupStrategyOptions struct {
|
||||
|
|
34
vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
generated
vendored
34
vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go
generated
vendored
|
@ -46,25 +46,6 @@ func (AllowedHostPath) SwaggerDoc() map[string]string {
|
|||
return map_AllowedHostPath
|
||||
}
|
||||
|
||||
var map_CustomMetricCurrentStatus = map[string]string{
|
||||
"name": "Custom Metric name.",
|
||||
"value": "Custom Metric value (average).",
|
||||
}
|
||||
|
||||
func (CustomMetricCurrentStatus) SwaggerDoc() map[string]string {
|
||||
return map_CustomMetricCurrentStatus
|
||||
}
|
||||
|
||||
var map_CustomMetricTarget = map[string]string{
|
||||
"": "Alpha-level support for Custom Metrics in HPA (as annotations).",
|
||||
"name": "Custom Metric name.",
|
||||
"value": "Custom Metric value (average).",
|
||||
}
|
||||
|
||||
func (CustomMetricTarget) SwaggerDoc() map[string]string {
|
||||
return map_CustomMetricTarget
|
||||
}
|
||||
|
||||
var map_DaemonSet = map[string]string{
|
||||
"": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.",
|
||||
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
|
||||
|
@ -114,7 +95,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_DaemonSetStatus = map[string]string{
|
||||
"": "DaemonSetStatus represents the current status of a daemon set.",
|
||||
"": "DaemonSetStatus represents the current status of a daemon set.",
|
||||
"currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
|
||||
"numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
|
||||
"desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
|
||||
|
@ -193,7 +174,7 @@ var map_DeploymentSpec = map[string]string{
|
|||
"template": "Template describes the pods that will be created.",
|
||||
"strategy": "The deployment strategy to use to replace existing pods with new ones.",
|
||||
"minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)",
|
||||
"revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified.",
|
||||
"revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"retaining all old RelicaSets\".",
|
||||
"paused": "Indicates that the deployment is paused and will not be processed by the deployment controller.",
|
||||
"rollbackTo": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.",
|
||||
"progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"no deadline\".",
|
||||
|
@ -472,6 +453,7 @@ var map_PodSecurityPolicySpec = map[string]string{
|
|||
"hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.",
|
||||
"seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.",
|
||||
"runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.",
|
||||
"runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.",
|
||||
"supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.",
|
||||
"fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.",
|
||||
"readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.",
|
||||
|
@ -584,6 +566,16 @@ func (RollingUpdateDeployment) SwaggerDoc() map[string]string {
|
|||
return map_RollingUpdateDeployment
|
||||
}
|
||||
|
||||
var map_RunAsGroupStrategyOptions = map[string]string{
|
||||
"": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsGroupStrategyOptions from policy API Group instead.",
|
||||
"rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.",
|
||||
"ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.",
|
||||
}
|
||||
|
||||
func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string {
|
||||
return map_RunAsGroupStrategyOptions
|
||||
}
|
||||
|
||||
var map_RunAsUserStrategyOptions = map[string]string{
|
||||
"": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsUserStrategyOptions from policy API Group instead.",
|
||||
"rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.",
|
||||
|
|
106
vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go
generated
vendored
106
vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go
generated
vendored
|
@ -59,86 +59,6 @@ func (in *AllowedHostPath) DeepCopy() *AllowedHostPath {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CustomMetricCurrentStatus) DeepCopyInto(out *CustomMetricCurrentStatus) {
|
||||
*out = *in
|
||||
out.CurrentValue = in.CurrentValue.DeepCopy()
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatus.
|
||||
func (in *CustomMetricCurrentStatus) DeepCopy() *CustomMetricCurrentStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CustomMetricCurrentStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CustomMetricCurrentStatusList) DeepCopyInto(out *CustomMetricCurrentStatusList) {
|
||||
*out = *in
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]CustomMetricCurrentStatus, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatusList.
|
||||
func (in *CustomMetricCurrentStatusList) DeepCopy() *CustomMetricCurrentStatusList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CustomMetricCurrentStatusList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CustomMetricTarget) DeepCopyInto(out *CustomMetricTarget) {
|
||||
*out = *in
|
||||
out.TargetValue = in.TargetValue.DeepCopy()
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTarget.
|
||||
func (in *CustomMetricTarget) DeepCopy() *CustomMetricTarget {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CustomMetricTarget)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CustomMetricTargetList) DeepCopyInto(out *CustomMetricTargetList) {
|
||||
*out = *in
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]CustomMetricTarget, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTargetList.
|
||||
func (in *CustomMetricTargetList) DeepCopy() *CustomMetricTargetList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CustomMetricTargetList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DaemonSet) DeepCopyInto(out *DaemonSet) {
|
||||
*out = *in
|
||||
|
@ -1102,6 +1022,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) {
|
|||
}
|
||||
in.SELinux.DeepCopyInto(&out.SELinux)
|
||||
in.RunAsUser.DeepCopyInto(&out.RunAsUser)
|
||||
if in.RunAsGroup != nil {
|
||||
in, out := &in.RunAsGroup, &out.RunAsGroup
|
||||
*out = new(RunAsGroupStrategyOptions)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups)
|
||||
in.FSGroup.DeepCopyInto(&out.FSGroup)
|
||||
if in.DefaultAllowPrivilegeEscalation != nil {
|
||||
|
@ -1368,6 +1293,27 @@ func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) {
|
||||
*out = *in
|
||||
if in.Ranges != nil {
|
||||
in, out := &in.Ranges, &out.Ranges
|
||||
*out = make([]IDRange, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions.
|
||||
func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RunAsGroupStrategyOptions)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) {
|
||||
*out = *in
|
||||
|
|
1
vendor/k8s.io/api/networking/v1/doc.go
generated
vendored
1
vendor/k8s.io/api/networking/v1/doc.go
generated
vendored
|
@ -17,4 +17,5 @@ limitations under the License.
|
|||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
// +groupName=networking.k8s.io
|
||||
|
||||
package v1 // import "k8s.io/api/networking/v1"
|
||||
|
|
21
vendor/k8s.io/api/networking/v1/generated.pb.go
generated
vendored
21
vendor/k8s.io/api/networking/v1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1 is a generated protocol buffer package.
|
||||
|
@ -446,24 +445,6 @@ func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
|
4
vendor/k8s.io/api/networking/v1/generated.proto
generated
vendored
4
vendor/k8s.io/api/networking/v1/generated.proto
generated
vendored
|
@ -114,7 +114,7 @@ message NetworkPolicyList {
|
|||
message NetworkPolicyPeer {
|
||||
// This is a label selector which selects Pods. This field follows standard label
|
||||
// selector semantics; if present but empty, it selects all pods.
|
||||
//
|
||||
//
|
||||
// If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects
|
||||
// the Pods matching PodSelector in the Namespaces selected by NamespaceSelector.
|
||||
// Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.
|
||||
|
@ -123,7 +123,7 @@ message NetworkPolicyPeer {
|
|||
|
||||
// Selects Namespaces using cluster-scoped labels. This field follows standard label
|
||||
// selector semantics; if present but empty, it selects all namespaces.
|
||||
//
|
||||
//
|
||||
// If PodSelector is also set, then the NetworkPolicyPeer as a whole selects
|
||||
// the Pods matching PodSelector in the Namespaces selected by NamespaceSelector.
|
||||
// Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.
|
||||
|
|
2
vendor/k8s.io/api/policy/v1beta1/doc.go
generated
vendored
2
vendor/k8s.io/api/policy/v1beta1/doc.go
generated
vendored
|
@ -15,9 +15,9 @@ limitations under the License.
|
|||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
|
||||
// Package policy is for any kind of policy object. Suitable examples, even if
|
||||
// they aren't all here, are PodDisruptionBudget, PodSecurityPolicy,
|
||||
// NetworkPolicy, etc.
|
||||
// +k8s:openapi-gen=true
|
||||
package v1beta1 // import "k8s.io/api/policy/v1beta1"
|
||||
|
|
624
vendor/k8s.io/api/policy/v1beta1/generated.pb.go
generated
vendored
624
vendor/k8s.io/api/policy/v1beta1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1beta1 is a generated protocol buffer package.
|
||||
|
@ -38,6 +37,7 @@ limitations under the License.
|
|||
PodSecurityPolicy
|
||||
PodSecurityPolicyList
|
||||
PodSecurityPolicySpec
|
||||
RunAsGroupStrategyOptions
|
||||
RunAsUserStrategyOptions
|
||||
SELinuxStrategyOptions
|
||||
SupplementalGroupsStrategyOptions
|
||||
|
@ -125,20 +125,26 @@ func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPol
|
|||
func (*PodSecurityPolicySpec) ProtoMessage() {}
|
||||
func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} }
|
||||
|
||||
func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} }
|
||||
func (*RunAsGroupStrategyOptions) ProtoMessage() {}
|
||||
func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptorGenerated, []int{13}
|
||||
}
|
||||
|
||||
func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} }
|
||||
func (*RunAsUserStrategyOptions) ProtoMessage() {}
|
||||
func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptorGenerated, []int{13}
|
||||
return fileDescriptorGenerated, []int{14}
|
||||
}
|
||||
|
||||
func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} }
|
||||
func (*SELinuxStrategyOptions) ProtoMessage() {}
|
||||
func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} }
|
||||
func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} }
|
||||
|
||||
func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} }
|
||||
func (*SupplementalGroupsStrategyOptions) ProtoMessage() {}
|
||||
func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptorGenerated, []int{15}
|
||||
return fileDescriptorGenerated, []int{16}
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
@ -155,6 +161,7 @@ func init() {
|
|||
proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicy")
|
||||
proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicyList")
|
||||
proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicySpec")
|
||||
proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsGroupStrategyOptions")
|
||||
proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsUserStrategyOptions")
|
||||
proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SELinuxStrategyOptions")
|
||||
proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SupplementalGroupsStrategyOptions")
|
||||
|
@ -853,6 +860,52 @@ func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) {
|
|||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
if m.RunAsGroup != nil {
|
||||
dAtA[i] = 0xb2
|
||||
i++
|
||||
dAtA[i] = 0x1
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsGroup.Size()))
|
||||
n18, err := m.RunAsGroup.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n18
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
dAtA[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule)))
|
||||
i += copy(dAtA[i:], m.Rule)
|
||||
if len(m.Ranges) > 0 {
|
||||
for _, msg := range m.Ranges {
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(msg.Size()))
|
||||
n, err := msg.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
|
@ -913,11 +966,11 @@ func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) {
|
|||
dAtA[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size()))
|
||||
n18, err := m.SELinuxOptions.MarshalTo(dAtA[i:])
|
||||
n19, err := m.SELinuxOptions.MarshalTo(dAtA[i:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i += n18
|
||||
i += n19
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
@ -956,24 +1009,6 @@ func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error)
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
@ -1212,6 +1247,24 @@ func (m *PodSecurityPolicySpec) Size() (n int) {
|
|||
n += 2 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.RunAsGroup != nil {
|
||||
l = m.RunAsGroup.Size()
|
||||
n += 2 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *RunAsGroupStrategyOptions) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
l = len(m.Rule)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
if len(m.Ranges) > 0 {
|
||||
for _, e := range m.Ranges {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
|
@ -1441,6 +1494,18 @@ func (this *PodSecurityPolicySpec) String() string {
|
|||
`AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`,
|
||||
`ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`,
|
||||
`AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`,
|
||||
`RunAsGroup:` + strings.Replace(fmt.Sprintf("%v", this.RunAsGroup), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *RunAsGroupStrategyOptions) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&RunAsGroupStrategyOptions{`,
|
||||
`Rule:` + fmt.Sprintf("%v", this.Rule) + `,`,
|
||||
`Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
|
@ -2537,51 +2602,14 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var keykey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
keykey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
if m.DisruptedPods == nil {
|
||||
m.DisruptedPods = make(map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time)
|
||||
}
|
||||
if iNdEx < postIndex {
|
||||
var valuekey uint64
|
||||
var mapkey string
|
||||
mapvalue := &k8s_io_apimachinery_pkg_apis_meta_v1.Time{}
|
||||
for iNdEx < postIndex {
|
||||
entryPreIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
|
@ -2591,46 +2619,85 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
valuekey |= (uint64(b) & 0x7F) << shift
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var mapmsglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
fieldNum := int32(wire >> 3)
|
||||
if fieldNum == 1 {
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iNdEx >= l {
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
mapmsglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
} else if fieldNum == 2 {
|
||||
var mapmsglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
mapmsglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postmsgIndex := iNdEx + mapmsglen
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postmsgIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{}
|
||||
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postmsgIndex
|
||||
} else {
|
||||
iNdEx = entryPreIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postmsgIndex := iNdEx + mapmsglen
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postmsgIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue := &k8s_io_apimachinery_pkg_apis_meta_v1.Time{}
|
||||
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postmsgIndex
|
||||
m.DisruptedPods[mapkey] = *mapvalue
|
||||
} else {
|
||||
var mapvalue k8s_io_apimachinery_pkg_apis_meta_v1.Time
|
||||
m.DisruptedPods[mapkey] = mapvalue
|
||||
}
|
||||
m.DisruptedPods[mapkey] = *mapvalue
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
|
@ -3537,6 +3604,149 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
case 22:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.RunAsGroup == nil {
|
||||
m.RunAsGroup = &RunAsGroupStrategyOptions{}
|
||||
}
|
||||
if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: RunAsGroupStrategyOptions: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: RunAsGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Rule = RunAsGroupStrategy(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Ranges = append(m.Ranges, IDRange{})
|
||||
if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
|
@ -4000,113 +4210,115 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptorGenerated = []byte{
|
||||
// 1715 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4f, 0x6f, 0x1b, 0xc7,
|
||||
0x15, 0xd7, 0x9a, 0x92, 0x48, 0x8d, 0x24, 0x5a, 0x1a, 0xfd, 0xe9, 0x46, 0xa8, 0xb9, 0x0e, 0x03,
|
||||
0x14, 0x6e, 0x90, 0x2c, 0x63, 0x39, 0x69, 0x8d, 0xa6, 0x2d, 0xa2, 0x35, 0x25, 0x5b, 0x81, 0x55,
|
||||
0xb1, 0x43, 0x3b, 0x68, 0x0b, 0xb7, 0xe8, 0x70, 0x77, 0x44, 0x4e, 0xb4, 0xdc, 0xdd, 0xce, 0xcc,
|
||||
0x32, 0xe4, 0xad, 0x87, 0x1e, 0x7a, 0xec, 0x17, 0xc8, 0x27, 0x28, 0x7a, 0xea, 0x97, 0x50, 0x81,
|
||||
0xa2, 0xc8, 0x31, 0xe8, 0x81, 0xa8, 0x59, 0xf4, 0x4b, 0xf8, 0xd2, 0x60, 0x87, 0xb3, 0x24, 0xf7,
|
||||
0x0f, 0x29, 0x2b, 0x40, 0x7c, 0xdb, 0x9d, 0xf7, 0xfb, 0xfd, 0xde, 0x9b, 0x37, 0x6f, 0xde, 0xce,
|
||||
0x0e, 0xb0, 0x2e, 0x1f, 0x72, 0x93, 0xfa, 0xb5, 0xcb, 0xb0, 0x45, 0x98, 0x47, 0x04, 0xe1, 0xb5,
|
||||
0x1e, 0xf1, 0x1c, 0x9f, 0xd5, 0x94, 0x01, 0x07, 0xb4, 0x16, 0xf8, 0x2e, 0xb5, 0x07, 0xb5, 0xde,
|
||||
0xfd, 0x16, 0x11, 0xf8, 0x7e, 0xad, 0x4d, 0x3c, 0xc2, 0xb0, 0x20, 0x8e, 0x19, 0x30, 0x5f, 0xf8,
|
||||
0xf0, 0xad, 0x31, 0xd4, 0xc4, 0x01, 0x35, 0xc7, 0x50, 0x53, 0x41, 0x0f, 0xde, 0x6f, 0x53, 0xd1,
|
||||
0x09, 0x5b, 0xa6, 0xed, 0x77, 0x6b, 0x6d, 0xbf, 0xed, 0xd7, 0x24, 0xa3, 0x15, 0x5e, 0xc8, 0x37,
|
||||
0xf9, 0x22, 0x9f, 0xc6, 0x4a, 0x07, 0xd5, 0x19, 0xa7, 0xb6, 0xcf, 0x48, 0xad, 0x97, 0xf1, 0x76,
|
||||
0xf0, 0xe1, 0x14, 0xd3, 0xc5, 0x76, 0x87, 0x7a, 0x84, 0x0d, 0x6a, 0xc1, 0x65, 0x3b, 0x1a, 0xe0,
|
||||
0xb5, 0x2e, 0x11, 0x38, 0x8f, 0x55, 0x9b, 0xc7, 0x62, 0xa1, 0x27, 0x68, 0x97, 0x64, 0x08, 0x3f,
|
||||
0xba, 0x8e, 0xc0, 0xed, 0x0e, 0xe9, 0xe2, 0x0c, 0xef, 0xc1, 0x3c, 0x5e, 0x28, 0xa8, 0x5b, 0xa3,
|
||||
0x9e, 0xe0, 0x82, 0xa5, 0x49, 0xd5, 0x8f, 0xc1, 0xf6, 0x91, 0xeb, 0xfa, 0x5f, 0x10, 0xe7, 0xc4,
|
||||
0x25, 0xfd, 0xcf, 0x7c, 0x37, 0xec, 0x12, 0xf8, 0x03, 0xb0, 0xea, 0x30, 0xda, 0x23, 0x4c, 0xd7,
|
||||
0xee, 0x6a, 0xf7, 0xd6, 0xac, 0xf2, 0xd5, 0xd0, 0x58, 0x1a, 0x0d, 0x8d, 0xd5, 0xba, 0x1c, 0x45,
|
||||
0xca, 0x5a, 0xe5, 0xe0, 0xb6, 0x22, 0x3f, 0xf1, 0xb9, 0x68, 0x60, 0xd1, 0x81, 0x87, 0x00, 0x04,
|
||||
0x58, 0x74, 0x1a, 0x8c, 0x5c, 0xd0, 0xbe, 0xa2, 0x43, 0x45, 0x07, 0x8d, 0x89, 0x05, 0xcd, 0xa0,
|
||||
0xe0, 0x7b, 0xa0, 0xc4, 0x08, 0x76, 0xce, 0x3d, 0x77, 0xa0, 0xdf, 0xba, 0xab, 0xdd, 0x2b, 0x59,
|
||||
0x5b, 0x8a, 0x51, 0x42, 0x6a, 0x1c, 0x4d, 0x10, 0xd5, 0x7f, 0x6b, 0xa0, 0x74, 0xdc, 0xa3, 0xb6,
|
||||
0xa0, 0xbe, 0x07, 0x7f, 0x0f, 0x4a, 0x51, 0xde, 0x1d, 0x2c, 0xb0, 0x74, 0xb6, 0x7e, 0xf8, 0x81,
|
||||
0x39, 0xad, 0x89, 0x49, 0x1a, 0xcc, 0xe0, 0xb2, 0x1d, 0x0d, 0x70, 0x33, 0x42, 0x9b, 0xbd, 0xfb,
|
||||
0xe6, 0x79, 0xeb, 0x73, 0x62, 0x8b, 0x33, 0x22, 0xf0, 0x34, 0xbc, 0xe9, 0x18, 0x9a, 0xa8, 0x42,
|
||||
0x17, 0x6c, 0x3a, 0xc4, 0x25, 0x82, 0x9c, 0x07, 0x91, 0x47, 0x2e, 0x23, 0x5c, 0x3f, 0x7c, 0xf0,
|
||||
0x7a, 0x6e, 0xea, 0xb3, 0x54, 0x6b, 0x7b, 0x34, 0x34, 0x36, 0x13, 0x43, 0x28, 0x29, 0x5e, 0xfd,
|
||||
0x52, 0x03, 0xfb, 0x27, 0xcd, 0xc7, 0xcc, 0x0f, 0x83, 0xa6, 0x88, 0xd6, 0xa9, 0x3d, 0x50, 0x26,
|
||||
0xf8, 0x63, 0xb0, 0xcc, 0x42, 0x97, 0xa8, 0x9c, 0xbe, 0xa3, 0x82, 0x5e, 0x46, 0xa1, 0x4b, 0x5e,
|
||||
0x0d, 0x8d, 0x9d, 0x14, 0xeb, 0xd9, 0x20, 0x20, 0x48, 0x12, 0xe0, 0xa7, 0x60, 0x95, 0x61, 0xaf,
|
||||
0x4d, 0xa2, 0xd0, 0x0b, 0xf7, 0xd6, 0x0f, 0xab, 0xe6, 0xdc, 0x5d, 0x63, 0x9e, 0xd6, 0x51, 0x04,
|
||||
0x9d, 0xae, 0xb8, 0x7c, 0xe5, 0x48, 0x29, 0x54, 0xcf, 0xc0, 0xa6, 0x5c, 0x6a, 0x9f, 0x09, 0x69,
|
||||
0x81, 0x77, 0x40, 0xa1, 0x4b, 0x3d, 0x19, 0xd4, 0x8a, 0xb5, 0xae, 0x58, 0x85, 0x33, 0xea, 0xa1,
|
||||
0x68, 0x5c, 0x9a, 0x71, 0x5f, 0xe6, 0x6c, 0xd6, 0x8c, 0xfb, 0x28, 0x1a, 0xaf, 0x3e, 0x06, 0x45,
|
||||
0xe5, 0x71, 0x56, 0xa8, 0xb0, 0x58, 0xa8, 0x90, 0x23, 0xf4, 0xd7, 0x5b, 0x60, 0xa7, 0xe1, 0x3b,
|
||||
0x75, 0xca, 0x59, 0x28, 0xf3, 0x65, 0x85, 0x4e, 0x9b, 0x88, 0x37, 0x50, 0x1f, 0xcf, 0xc0, 0x32,
|
||||
0x0f, 0x88, 0xad, 0xca, 0xe2, 0x70, 0x41, 0x6e, 0x73, 0xe2, 0x6b, 0x06, 0xc4, 0xb6, 0x36, 0xe2,
|
||||
0xa5, 0x8c, 0xde, 0x90, 0x54, 0x83, 0x2f, 0xc0, 0x2a, 0x17, 0x58, 0x84, 0x5c, 0x2f, 0x48, 0xdd,
|
||||
0x0f, 0x6f, 0xa8, 0x2b, 0xb9, 0xd3, 0x55, 0x1c, 0xbf, 0x23, 0xa5, 0x59, 0xfd, 0xa7, 0x06, 0xbe,
|
||||
0x97, 0xc3, 0x7a, 0x4a, 0xb9, 0x80, 0x2f, 0x32, 0x19, 0x33, 0x5f, 0x2f, 0x63, 0x11, 0x5b, 0xe6,
|
||||
0x6b, 0xb2, 0x79, 0xe3, 0x91, 0x99, 0x6c, 0x35, 0xc1, 0x0a, 0x15, 0xa4, 0x1b, 0x97, 0xa2, 0x79,
|
||||
0xb3, 0x69, 0x59, 0x9b, 0x4a, 0x7a, 0xe5, 0x34, 0x12, 0x41, 0x63, 0xad, 0xea, 0xbf, 0x6e, 0xe5,
|
||||
0x4e, 0x27, 0x4a, 0x27, 0xbc, 0x00, 0x1b, 0x5d, 0xea, 0x1d, 0xf5, 0x30, 0x75, 0x71, 0x4b, 0xed,
|
||||
0x9e, 0x45, 0x45, 0x10, 0xf5, 0x4a, 0x73, 0xdc, 0x2b, 0xcd, 0x53, 0x4f, 0x9c, 0xb3, 0xa6, 0x60,
|
||||
0xd4, 0x6b, 0x5b, 0x5b, 0xa3, 0xa1, 0xb1, 0x71, 0x36, 0xa3, 0x84, 0x12, 0xba, 0xf0, 0xb7, 0xa0,
|
||||
0xc4, 0x89, 0x4b, 0x6c, 0xe1, 0xb3, 0x9b, 0x75, 0x88, 0xa7, 0xb8, 0x45, 0xdc, 0xa6, 0xa2, 0x5a,
|
||||
0x1b, 0x51, 0xde, 0xe2, 0x37, 0x34, 0x91, 0x84, 0x2e, 0x28, 0x77, 0x71, 0xff, 0xb9, 0x87, 0x27,
|
||||
0x13, 0x29, 0x7c, 0xcb, 0x89, 0xc0, 0xd1, 0xd0, 0x28, 0x9f, 0x25, 0xb4, 0x50, 0x4a, 0xbb, 0xfa,
|
||||
0xbf, 0x65, 0xf0, 0xd6, 0xdc, 0xaa, 0x82, 0x9f, 0x02, 0xe8, 0xb7, 0x38, 0x61, 0x3d, 0xe2, 0x3c,
|
||||
0x1e, 0x7f, 0x4d, 0xa8, 0x1f, 0x6f, 0xdc, 0x03, 0xb5, 0x40, 0xf0, 0x3c, 0x83, 0x40, 0x39, 0x2c,
|
||||
0xf8, 0x27, 0x0d, 0x6c, 0x3a, 0x63, 0x37, 0xc4, 0x69, 0xf8, 0x4e, 0x5c, 0x18, 0x8f, 0xbf, 0x4d,
|
||||
0xbd, 0x9b, 0xf5, 0x59, 0xa5, 0x63, 0x4f, 0xb0, 0x81, 0xb5, 0xa7, 0x02, 0xda, 0x4c, 0xd8, 0x50,
|
||||
0xd2, 0x29, 0x3c, 0x03, 0xd0, 0x99, 0x48, 0x72, 0xf5, 0x4d, 0x93, 0x29, 0x5e, 0xb1, 0xee, 0x28,
|
||||
0x85, 0xbd, 0x84, 0xdf, 0x18, 0x84, 0x72, 0x88, 0xf0, 0xe7, 0xa0, 0x6c, 0x87, 0x8c, 0x11, 0x4f,
|
||||
0x3c, 0x21, 0xd8, 0x15, 0x9d, 0x81, 0xbe, 0x2c, 0xa5, 0xf6, 0x95, 0x54, 0xf9, 0x51, 0xc2, 0x8a,
|
||||
0x52, 0xe8, 0x88, 0xef, 0x10, 0x4e, 0x19, 0x71, 0x62, 0xfe, 0x4a, 0x92, 0x5f, 0x4f, 0x58, 0x51,
|
||||
0x0a, 0x0d, 0x1f, 0x82, 0x0d, 0xd2, 0x0f, 0x88, 0x1d, 0xe7, 0x74, 0x55, 0xb2, 0x77, 0x15, 0x7b,
|
||||
0xe3, 0x78, 0xc6, 0x86, 0x12, 0xc8, 0x03, 0x17, 0xc0, 0x6c, 0x12, 0xe1, 0x16, 0x28, 0x5c, 0x92,
|
||||
0xc1, 0xf8, 0xcb, 0x83, 0xa2, 0x47, 0xf8, 0x09, 0x58, 0xe9, 0x61, 0x37, 0x24, 0xaa, 0xd6, 0xdf,
|
||||
0x7d, 0xbd, 0x5a, 0x7f, 0x46, 0xbb, 0x04, 0x8d, 0x89, 0x3f, 0xb9, 0xf5, 0x50, 0xab, 0xfe, 0x43,
|
||||
0x03, 0xdb, 0x0d, 0xdf, 0x69, 0x12, 0x3b, 0x64, 0x54, 0x0c, 0x1a, 0x72, 0x9d, 0xdf, 0x40, 0xcf,
|
||||
0x46, 0x89, 0x9e, 0xfd, 0xc1, 0xe2, 0x5a, 0x4b, 0x46, 0x37, 0xaf, 0x63, 0x57, 0xaf, 0x34, 0xb0,
|
||||
0x97, 0x41, 0xbf, 0x81, 0x8e, 0xfa, 0xcb, 0x64, 0x47, 0x7d, 0xef, 0x26, 0x93, 0x99, 0xd3, 0x4f,
|
||||
0xff, 0x5f, 0xce, 0x99, 0x8a, 0xec, 0xa6, 0xd1, 0xe9, 0x8e, 0xd1, 0x1e, 0x75, 0x49, 0x9b, 0x38,
|
||||
0x72, 0x32, 0xa5, 0x99, 0xd3, 0xdd, 0xc4, 0x82, 0x66, 0x50, 0x90, 0x83, 0x7d, 0x87, 0x5c, 0xe0,
|
||||
0xd0, 0x15, 0x47, 0x8e, 0xf3, 0x08, 0x07, 0xb8, 0x45, 0x5d, 0x2a, 0xa8, 0x3a, 0x8e, 0xac, 0x59,
|
||||
0x1f, 0x8f, 0x86, 0xc6, 0x7e, 0x3d, 0x17, 0xf1, 0x6a, 0x68, 0xdc, 0xc9, 0x9e, 0xcb, 0xcd, 0x09,
|
||||
0x64, 0x80, 0xe6, 0x48, 0xc3, 0x01, 0xd0, 0x19, 0xf9, 0x43, 0x18, 0x6d, 0x8a, 0x3a, 0xf3, 0x83,
|
||||
0x84, 0xdb, 0x82, 0x74, 0xfb, 0xb3, 0xd1, 0xd0, 0xd0, 0xd1, 0x1c, 0xcc, 0xf5, 0x8e, 0xe7, 0xca,
|
||||
0xc3, 0xcf, 0xc1, 0x0e, 0x1e, 0xf7, 0x81, 0x84, 0xd7, 0x65, 0xe9, 0xf5, 0xe1, 0x68, 0x68, 0xec,
|
||||
0x1c, 0x65, 0xcd, 0xd7, 0x3b, 0xcc, 0x13, 0x85, 0x35, 0x50, 0xec, 0xc9, 0x23, 0x3b, 0xd7, 0x57,
|
||||
0xa4, 0xfe, 0xde, 0x68, 0x68, 0x14, 0xc7, 0xa7, 0xf8, 0x48, 0x73, 0xf5, 0xa4, 0x29, 0x0f, 0x82,
|
||||
0x31, 0x0a, 0x7e, 0x04, 0xd6, 0x3b, 0x3e, 0x17, 0xbf, 0x20, 0xe2, 0x0b, 0x9f, 0x5d, 0xca, 0xc6,
|
||||
0x50, 0xb2, 0x76, 0xd4, 0x0a, 0xae, 0x3f, 0x99, 0x9a, 0xd0, 0x2c, 0x0e, 0xfe, 0x1a, 0xac, 0x75,
|
||||
0xd4, 0xb1, 0x8f, 0xeb, 0x45, 0x59, 0x68, 0xf7, 0x16, 0x14, 0x5a, 0xe2, 0x88, 0x68, 0x6d, 0x2b,
|
||||
0xf9, 0xb5, 0x78, 0x98, 0xa3, 0xa9, 0x1a, 0xfc, 0x21, 0x28, 0xca, 0x97, 0xd3, 0xba, 0x5e, 0x92,
|
||||
0xd1, 0xdc, 0x56, 0xf0, 0xe2, 0x93, 0xf1, 0x30, 0x8a, 0xed, 0x31, 0xf4, 0xb4, 0xf1, 0x48, 0x5f,
|
||||
0xcb, 0x42, 0x4f, 0x1b, 0x8f, 0x50, 0x6c, 0x87, 0x2f, 0x40, 0x91, 0x93, 0xa7, 0xd4, 0x0b, 0xfb,
|
||||
0x3a, 0x90, 0x5b, 0xee, 0xfe, 0x82, 0x70, 0x9b, 0xc7, 0x12, 0x99, 0x3a, 0x70, 0x4f, 0xd5, 0x95,
|
||||
0x1d, 0xc5, 0x92, 0xd0, 0x01, 0x6b, 0x2c, 0xf4, 0x8e, 0xf8, 0x73, 0x4e, 0x98, 0xbe, 0x9e, 0xf9,
|
||||
0xda, 0xa7, 0xf5, 0x51, 0x8c, 0x4d, 0x7b, 0x98, 0x64, 0x66, 0x82, 0x40, 0x53, 0x61, 0xf8, 0x67,
|
||||
0x0d, 0x40, 0x1e, 0x06, 0x81, 0x4b, 0xba, 0xc4, 0x13, 0xd8, 0x95, 0xe7, 0x7b, 0xae, 0x6f, 0x48,
|
||||
0x7f, 0x3f, 0x5d, 0x34, 0x9f, 0x0c, 0x29, 0xed, 0x78, 0xf2, 0x99, 0xce, 0x42, 0x51, 0x8e, 0xcf,
|
||||
0x28, 0x9d, 0x17, 0x5c, 0x3e, 0xeb, 0x9b, 0xd7, 0xa6, 0x33, 0xff, 0xff, 0x65, 0x9a, 0x4e, 0x65,
|
||||
0x47, 0xb1, 0x24, 0xfc, 0x0c, 0xec, 0xc7, 0x7f, 0x77, 0xc8, 0xf7, 0xc5, 0x09, 0x75, 0x09, 0x1f,
|
||||
0x70, 0x41, 0xba, 0x7a, 0x59, 0x2e, 0x73, 0x45, 0x31, 0xf7, 0x51, 0x2e, 0x0a, 0xcd, 0x61, 0xc3,
|
||||
0x2e, 0x30, 0xe2, 0xf6, 0x10, 0xed, 0x9d, 0x49, 0x7f, 0x3a, 0xe6, 0x36, 0x76, 0xc7, 0xa7, 0x96,
|
||||
0xdb, 0xd2, 0xc1, 0x3b, 0xa3, 0xa1, 0x61, 0xd4, 0x17, 0x43, 0xd1, 0x75, 0x5a, 0xf0, 0x57, 0x40,
|
||||
0xc7, 0xf3, 0xfc, 0x6c, 0x49, 0x3f, 0xdf, 0x8f, 0x7a, 0xce, 0x5c, 0x07, 0x73, 0xd9, 0x30, 0x00,
|
||||
0x5b, 0x38, 0xf9, 0x9f, 0xcd, 0xf5, 0x6d, 0xb9, 0x0b, 0xdf, 0x5d, 0xb0, 0x0e, 0xa9, 0x5f, 0x73,
|
||||
0x4b, 0x57, 0x69, 0xdc, 0x4a, 0x19, 0x38, 0xca, 0xa8, 0xc3, 0x3e, 0x80, 0x38, 0x7d, 0x2d, 0xc0,
|
||||
0x75, 0x78, 0xed, 0x27, 0x26, 0x73, 0x97, 0x30, 0x2d, 0xb5, 0x8c, 0x89, 0xa3, 0x1c, 0x1f, 0xf0,
|
||||
0x29, 0xd8, 0x55, 0xa3, 0xcf, 0x3d, 0x8e, 0x2f, 0x48, 0x73, 0xc0, 0x6d, 0xe1, 0x72, 0x7d, 0x47,
|
||||
0xf6, 0x37, 0x7d, 0x34, 0x34, 0x76, 0x8f, 0x72, 0xec, 0x28, 0x97, 0x05, 0x3f, 0x01, 0x5b, 0x17,
|
||||
0x3e, 0x6b, 0x51, 0xc7, 0x21, 0x5e, 0xac, 0xb4, 0x2b, 0x95, 0x76, 0xa3, 0x4c, 0x9c, 0xa4, 0x6c,
|
||||
0x28, 0x83, 0x86, 0x1c, 0xec, 0x29, 0xe5, 0x06, 0xf3, 0xed, 0x33, 0x3f, 0xf4, 0x44, 0xd4, 0x52,
|
||||
0xb9, 0xbe, 0x37, 0xf9, 0x8c, 0xec, 0x1d, 0xe5, 0x01, 0x5e, 0x0d, 0x8d, 0xbb, 0x39, 0x2d, 0x3d,
|
||||
0x01, 0x42, 0xf9, 0xda, 0xd5, 0x2f, 0x35, 0xa0, 0xcf, 0xeb, 0x1a, 0xf0, 0xa3, 0xc4, 0x45, 0xc0,
|
||||
0xdb, 0xa9, 0x8b, 0x80, 0xed, 0x0c, 0xef, 0x3b, 0xb8, 0x06, 0xf8, 0x9b, 0x06, 0xf6, 0xf3, 0xbb,
|
||||
0x26, 0x7c, 0x90, 0x88, 0xce, 0x48, 0x45, 0x77, 0x3b, 0xc5, 0x52, 0xb1, 0xfd, 0x0e, 0x94, 0x55,
|
||||
0x6f, 0x4d, 0xde, 0xb2, 0x24, 0x62, 0x8c, 0x32, 0x18, 0x1d, 0x8b, 0x94, 0x44, 0xdc, 0x57, 0xe4,
|
||||
0x0f, 0x4d, 0x72, 0x0c, 0xa5, 0xd4, 0xaa, 0x7f, 0xd7, 0xc0, 0xdb, 0xd7, 0x76, 0x45, 0x68, 0x25,
|
||||
0x42, 0x37, 0x53, 0xa1, 0x57, 0xe6, 0x0b, 0x7c, 0x37, 0x97, 0x2d, 0xd6, 0xfb, 0x57, 0x2f, 0x2b,
|
||||
0x4b, 0x5f, 0xbd, 0xac, 0x2c, 0x7d, 0xfd, 0xb2, 0xb2, 0xf4, 0xc7, 0x51, 0x45, 0xbb, 0x1a, 0x55,
|
||||
0xb4, 0xaf, 0x46, 0x15, 0xed, 0xeb, 0x51, 0x45, 0xfb, 0xcf, 0xa8, 0xa2, 0xfd, 0xe5, 0xbf, 0x95,
|
||||
0xa5, 0xdf, 0x14, 0x95, 0xdc, 0x37, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa8, 0xba, 0x23, 0xa4, 0x51,
|
||||
0x15, 0x00, 0x00,
|
||||
// 1756 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xdd, 0x8e, 0xdb, 0xc6,
|
||||
0x15, 0x5e, 0x5a, 0xfb, 0xa3, 0x9d, 0xfd, 0x9f, 0xfd, 0x29, 0xbd, 0xa8, 0x45, 0x47, 0x01, 0x0a,
|
||||
0x37, 0x48, 0xa8, 0x78, 0x9d, 0xa4, 0x46, 0xd3, 0x16, 0x59, 0x5a, 0xbb, 0xf6, 0x06, 0xde, 0xae,
|
||||
0x3a, 0xb2, 0x83, 0xb6, 0x70, 0x8b, 0x8e, 0xc4, 0x59, 0xed, 0x64, 0x29, 0x92, 0x9d, 0x19, 0x2a,
|
||||
0xab, 0xbb, 0x5e, 0xf4, 0xa2, 0x97, 0x7d, 0x81, 0xa0, 0x0f, 0x50, 0xf4, 0xaa, 0x2f, 0xe1, 0x02,
|
||||
0x45, 0x91, 0xcb, 0xa0, 0x17, 0x42, 0xad, 0x22, 0x2f, 0xe1, 0xab, 0x80, 0xa3, 0x21, 0x25, 0xfe,
|
||||
0x49, 0x5e, 0x03, 0xf6, 0x1d, 0x39, 0xe7, 0xfb, 0xbe, 0x73, 0xe6, 0xcc, 0x99, 0x33, 0x43, 0x02,
|
||||
0xeb, 0xf2, 0x3e, 0x37, 0xa9, 0x57, 0xbb, 0x0c, 0x5a, 0x84, 0xb9, 0x44, 0x10, 0x5e, 0xeb, 0x11,
|
||||
0xd7, 0xf6, 0x58, 0x4d, 0x19, 0xb0, 0x4f, 0x6b, 0xbe, 0xe7, 0xd0, 0x76, 0xbf, 0xd6, 0xbb, 0xdb,
|
||||
0x22, 0x02, 0xdf, 0xad, 0x75, 0x88, 0x4b, 0x18, 0x16, 0xc4, 0x36, 0x7d, 0xe6, 0x09, 0x0f, 0xde,
|
||||
0x1c, 0x41, 0x4d, 0xec, 0x53, 0x73, 0x04, 0x35, 0x15, 0x74, 0xff, 0x83, 0x0e, 0x15, 0x17, 0x41,
|
||||
0xcb, 0x6c, 0x7b, 0xdd, 0x5a, 0xc7, 0xeb, 0x78, 0x35, 0xc9, 0x68, 0x05, 0xe7, 0xf2, 0x4d, 0xbe,
|
||||
0xc8, 0xa7, 0x91, 0xd2, 0x7e, 0x75, 0xc2, 0x69, 0xdb, 0x63, 0xa4, 0xd6, 0xcb, 0x78, 0xdb, 0xff,
|
||||
0x68, 0x8c, 0xe9, 0xe2, 0xf6, 0x05, 0x75, 0x09, 0xeb, 0xd7, 0xfc, 0xcb, 0x4e, 0x38, 0xc0, 0x6b,
|
||||
0x5d, 0x22, 0x70, 0x1e, 0xab, 0x56, 0xc4, 0x62, 0x81, 0x2b, 0x68, 0x97, 0x64, 0x08, 0x9f, 0xcc,
|
||||
0x22, 0xf0, 0xf6, 0x05, 0xe9, 0xe2, 0x0c, 0xef, 0x5e, 0x11, 0x2f, 0x10, 0xd4, 0xa9, 0x51, 0x57,
|
||||
0x70, 0xc1, 0xd2, 0xa4, 0xea, 0xa7, 0x60, 0xeb, 0xd0, 0x71, 0xbc, 0xaf, 0x88, 0x7d, 0xec, 0x90,
|
||||
0xab, 0x2f, 0x3c, 0x27, 0xe8, 0x12, 0xf8, 0x23, 0xb0, 0x68, 0x33, 0xda, 0x23, 0x4c, 0xd7, 0x6e,
|
||||
0x6b, 0x77, 0x96, 0xad, 0xf5, 0xe7, 0x03, 0x63, 0x6e, 0x38, 0x30, 0x16, 0xeb, 0x72, 0x14, 0x29,
|
||||
0x6b, 0x95, 0x83, 0x0d, 0x45, 0x7e, 0xe4, 0x71, 0xd1, 0xc0, 0xe2, 0x02, 0x1e, 0x00, 0xe0, 0x63,
|
||||
0x71, 0xd1, 0x60, 0xe4, 0x9c, 0x5e, 0x29, 0x3a, 0x54, 0x74, 0xd0, 0x88, 0x2d, 0x68, 0x02, 0x05,
|
||||
0xdf, 0x07, 0x65, 0x46, 0xb0, 0x7d, 0xe6, 0x3a, 0x7d, 0xfd, 0xc6, 0x6d, 0xed, 0x4e, 0xd9, 0xda,
|
||||
0x54, 0x8c, 0x32, 0x52, 0xe3, 0x28, 0x46, 0x54, 0xff, 0xab, 0x81, 0xf2, 0x51, 0x8f, 0xb6, 0x05,
|
||||
0xf5, 0x5c, 0xf8, 0x07, 0x50, 0x0e, 0xf3, 0x6e, 0x63, 0x81, 0xa5, 0xb3, 0x95, 0x83, 0x0f, 0xcd,
|
||||
0x71, 0x4d, 0xc4, 0x69, 0x30, 0xfd, 0xcb, 0x4e, 0x38, 0xc0, 0xcd, 0x10, 0x6d, 0xf6, 0xee, 0x9a,
|
||||
0x67, 0xad, 0x2f, 0x49, 0x5b, 0x9c, 0x12, 0x81, 0xc7, 0xe1, 0x8d, 0xc7, 0x50, 0xac, 0x0a, 0x1d,
|
||||
0xb0, 0x66, 0x13, 0x87, 0x08, 0x72, 0xe6, 0x87, 0x1e, 0xb9, 0x8c, 0x70, 0xe5, 0xe0, 0xde, 0xab,
|
||||
0xb9, 0xa9, 0x4f, 0x52, 0xad, 0xad, 0xe1, 0xc0, 0x58, 0x4b, 0x0c, 0xa1, 0xa4, 0x78, 0xf5, 0x6b,
|
||||
0x0d, 0xec, 0x1d, 0x37, 0x1f, 0x32, 0x2f, 0xf0, 0x9b, 0x22, 0x5c, 0xa7, 0x4e, 0x5f, 0x99, 0xe0,
|
||||
0x4f, 0xc0, 0x3c, 0x0b, 0x1c, 0xa2, 0x72, 0xfa, 0xae, 0x0a, 0x7a, 0x1e, 0x05, 0x0e, 0x79, 0x39,
|
||||
0x30, 0xb6, 0x53, 0xac, 0x27, 0x7d, 0x9f, 0x20, 0x49, 0x80, 0x9f, 0x83, 0x45, 0x86, 0xdd, 0x0e,
|
||||
0x09, 0x43, 0x2f, 0xdd, 0x59, 0x39, 0xa8, 0x9a, 0x85, 0xbb, 0xc6, 0x3c, 0xa9, 0xa3, 0x10, 0x3a,
|
||||
0x5e, 0x71, 0xf9, 0xca, 0x91, 0x52, 0xa8, 0x9e, 0x82, 0x35, 0xb9, 0xd4, 0x1e, 0x13, 0xd2, 0x02,
|
||||
0x6f, 0x81, 0x52, 0x97, 0xba, 0x32, 0xa8, 0x05, 0x6b, 0x45, 0xb1, 0x4a, 0xa7, 0xd4, 0x45, 0xe1,
|
||||
0xb8, 0x34, 0xe3, 0x2b, 0x99, 0xb3, 0x49, 0x33, 0xbe, 0x42, 0xe1, 0x78, 0xf5, 0x21, 0x58, 0x52,
|
||||
0x1e, 0x27, 0x85, 0x4a, 0xd3, 0x85, 0x4a, 0x39, 0x42, 0x7f, 0xbf, 0x01, 0xb6, 0x1b, 0x9e, 0x5d,
|
||||
0xa7, 0x9c, 0x05, 0x32, 0x5f, 0x56, 0x60, 0x77, 0x88, 0x78, 0x0b, 0xf5, 0xf1, 0x04, 0xcc, 0x73,
|
||||
0x9f, 0xb4, 0x55, 0x59, 0x1c, 0x4c, 0xc9, 0x6d, 0x4e, 0x7c, 0x4d, 0x9f, 0xb4, 0xad, 0xd5, 0x68,
|
||||
0x29, 0xc3, 0x37, 0x24, 0xd5, 0xe0, 0x33, 0xb0, 0xc8, 0x05, 0x16, 0x01, 0xd7, 0x4b, 0x52, 0xf7,
|
||||
0xa3, 0x6b, 0xea, 0x4a, 0xee, 0x78, 0x15, 0x47, 0xef, 0x48, 0x69, 0x56, 0xff, 0xad, 0x81, 0x1f,
|
||||
0xe4, 0xb0, 0x1e, 0x53, 0x2e, 0xe0, 0xb3, 0x4c, 0xc6, 0xcc, 0x57, 0xcb, 0x58, 0xc8, 0x96, 0xf9,
|
||||
0x8a, 0x37, 0x6f, 0x34, 0x32, 0x91, 0xad, 0x26, 0x58, 0xa0, 0x82, 0x74, 0xa3, 0x52, 0x34, 0xaf,
|
||||
0x37, 0x2d, 0x6b, 0x4d, 0x49, 0x2f, 0x9c, 0x84, 0x22, 0x68, 0xa4, 0x55, 0xfd, 0xcf, 0x8d, 0xdc,
|
||||
0xe9, 0x84, 0xe9, 0x84, 0xe7, 0x60, 0xb5, 0x4b, 0xdd, 0xc3, 0x1e, 0xa6, 0x0e, 0x6e, 0xa9, 0xdd,
|
||||
0x33, 0xad, 0x08, 0xc2, 0x5e, 0x69, 0x8e, 0x7a, 0xa5, 0x79, 0xe2, 0x8a, 0x33, 0xd6, 0x14, 0x8c,
|
||||
0xba, 0x1d, 0x6b, 0x73, 0x38, 0x30, 0x56, 0x4f, 0x27, 0x94, 0x50, 0x42, 0x17, 0xfe, 0x0e, 0x94,
|
||||
0x39, 0x71, 0x48, 0x5b, 0x78, 0xec, 0x7a, 0x1d, 0xe2, 0x31, 0x6e, 0x11, 0xa7, 0xa9, 0xa8, 0xd6,
|
||||
0x6a, 0x98, 0xb7, 0xe8, 0x0d, 0xc5, 0x92, 0xd0, 0x01, 0xeb, 0x5d, 0x7c, 0xf5, 0xd4, 0xc5, 0xf1,
|
||||
0x44, 0x4a, 0xaf, 0x39, 0x11, 0x38, 0x1c, 0x18, 0xeb, 0xa7, 0x09, 0x2d, 0x94, 0xd2, 0xae, 0x7e,
|
||||
0x37, 0x0f, 0x6e, 0x16, 0x56, 0x15, 0xfc, 0x1c, 0x40, 0xaf, 0xc5, 0x09, 0xeb, 0x11, 0xfb, 0xe1,
|
||||
0xe8, 0x34, 0xa1, 0x5e, 0xb4, 0x71, 0xf7, 0xd5, 0x02, 0xc1, 0xb3, 0x0c, 0x02, 0xe5, 0xb0, 0xe0,
|
||||
0x9f, 0x35, 0xb0, 0x66, 0x8f, 0xdc, 0x10, 0xbb, 0xe1, 0xd9, 0x51, 0x61, 0x3c, 0x7c, 0x9d, 0x7a,
|
||||
0x37, 0xeb, 0x93, 0x4a, 0x47, 0xae, 0x60, 0x7d, 0x6b, 0x57, 0x05, 0xb4, 0x96, 0xb0, 0xa1, 0xa4,
|
||||
0x53, 0x78, 0x0a, 0xa0, 0x1d, 0x4b, 0x72, 0x75, 0xa6, 0xc9, 0x14, 0x2f, 0x58, 0xb7, 0x94, 0xc2,
|
||||
0x6e, 0xc2, 0x6f, 0x04, 0x42, 0x39, 0x44, 0xf8, 0x0b, 0xb0, 0xde, 0x0e, 0x18, 0x23, 0xae, 0x78,
|
||||
0x44, 0xb0, 0x23, 0x2e, 0xfa, 0xfa, 0xbc, 0x94, 0xda, 0x53, 0x52, 0xeb, 0x0f, 0x12, 0x56, 0x94,
|
||||
0x42, 0x87, 0x7c, 0x9b, 0x70, 0xca, 0x88, 0x1d, 0xf1, 0x17, 0x92, 0xfc, 0x7a, 0xc2, 0x8a, 0x52,
|
||||
0x68, 0x78, 0x1f, 0xac, 0x92, 0x2b, 0x9f, 0xb4, 0xa3, 0x9c, 0x2e, 0x4a, 0xf6, 0x8e, 0x62, 0xaf,
|
||||
0x1e, 0x4d, 0xd8, 0x50, 0x02, 0xb9, 0xef, 0x00, 0x98, 0x4d, 0x22, 0xdc, 0x04, 0xa5, 0x4b, 0xd2,
|
||||
0x1f, 0x9d, 0x3c, 0x28, 0x7c, 0x84, 0x9f, 0x81, 0x85, 0x1e, 0x76, 0x02, 0xa2, 0x6a, 0xfd, 0xbd,
|
||||
0x57, 0xab, 0xf5, 0x27, 0xb4, 0x4b, 0xd0, 0x88, 0xf8, 0xd3, 0x1b, 0xf7, 0xb5, 0xea, 0xbf, 0x34,
|
||||
0xb0, 0xd5, 0xf0, 0xec, 0x26, 0x69, 0x07, 0x8c, 0x8a, 0x7e, 0x43, 0xae, 0xf3, 0x5b, 0xe8, 0xd9,
|
||||
0x28, 0xd1, 0xb3, 0x3f, 0x9c, 0x5e, 0x6b, 0xc9, 0xe8, 0x8a, 0x3a, 0x76, 0xf5, 0xb9, 0x06, 0x76,
|
||||
0x33, 0xe8, 0xb7, 0xd0, 0x51, 0x7f, 0x95, 0xec, 0xa8, 0xef, 0x5f, 0x67, 0x32, 0x05, 0xfd, 0xf4,
|
||||
0xbb, 0x8d, 0x9c, 0xa9, 0xc8, 0x6e, 0x1a, 0xde, 0xee, 0x18, 0xed, 0x51, 0x87, 0x74, 0x88, 0x2d,
|
||||
0x27, 0x53, 0x9e, 0xb8, 0xdd, 0xc5, 0x16, 0x34, 0x81, 0x82, 0x1c, 0xec, 0xd9, 0xe4, 0x1c, 0x07,
|
||||
0x8e, 0x38, 0xb4, 0xed, 0x07, 0xd8, 0xc7, 0x2d, 0xea, 0x50, 0x41, 0xd5, 0x75, 0x64, 0xd9, 0xfa,
|
||||
0x74, 0x38, 0x30, 0xf6, 0xea, 0xb9, 0x88, 0x97, 0x03, 0xe3, 0x56, 0xf6, 0x5e, 0x6e, 0xc6, 0x90,
|
||||
0x3e, 0x2a, 0x90, 0x86, 0x7d, 0xa0, 0x33, 0xf2, 0xc7, 0x20, 0xdc, 0x14, 0x75, 0xe6, 0xf9, 0x09,
|
||||
0xb7, 0x25, 0xe9, 0xf6, 0xe7, 0xc3, 0x81, 0xa1, 0xa3, 0x02, 0xcc, 0x6c, 0xc7, 0x85, 0xf2, 0xf0,
|
||||
0x4b, 0xb0, 0x8d, 0x47, 0x7d, 0x20, 0xe1, 0x75, 0x5e, 0x7a, 0xbd, 0x3f, 0x1c, 0x18, 0xdb, 0x87,
|
||||
0x59, 0xf3, 0x6c, 0x87, 0x79, 0xa2, 0xb0, 0x06, 0x96, 0x7a, 0xf2, 0xca, 0xce, 0xf5, 0x05, 0xa9,
|
||||
0xbf, 0x3b, 0x1c, 0x18, 0x4b, 0xa3, 0x5b, 0x7c, 0xa8, 0xb9, 0x78, 0xdc, 0x94, 0x17, 0xc1, 0x08,
|
||||
0x05, 0x3f, 0x06, 0x2b, 0x17, 0x1e, 0x17, 0xbf, 0x24, 0xe2, 0x2b, 0x8f, 0x5d, 0xca, 0xc6, 0x50,
|
||||
0xb6, 0xb6, 0xd5, 0x0a, 0xae, 0x3c, 0x1a, 0x9b, 0xd0, 0x24, 0x0e, 0xfe, 0x06, 0x2c, 0x5f, 0xa8,
|
||||
0x6b, 0x1f, 0xd7, 0x97, 0x64, 0xa1, 0xdd, 0x99, 0x52, 0x68, 0x89, 0x2b, 0xa2, 0xb5, 0xa5, 0xe4,
|
||||
0x97, 0xa3, 0x61, 0x8e, 0xc6, 0x6a, 0xf0, 0xc7, 0x60, 0x49, 0xbe, 0x9c, 0xd4, 0xf5, 0xb2, 0x8c,
|
||||
0x66, 0x43, 0xc1, 0x97, 0x1e, 0x8d, 0x86, 0x51, 0x64, 0x8f, 0xa0, 0x27, 0x8d, 0x07, 0xfa, 0x72,
|
||||
0x16, 0x7a, 0xd2, 0x78, 0x80, 0x22, 0x3b, 0x7c, 0x06, 0x96, 0x38, 0x79, 0x4c, 0xdd, 0xe0, 0x4a,
|
||||
0x07, 0x72, 0xcb, 0xdd, 0x9d, 0x12, 0x6e, 0xf3, 0x48, 0x22, 0x53, 0x17, 0xee, 0xb1, 0xba, 0xb2,
|
||||
0xa3, 0x48, 0x12, 0xda, 0x60, 0x99, 0x05, 0xee, 0x21, 0x7f, 0xca, 0x09, 0xd3, 0x57, 0x32, 0xa7,
|
||||
0x7d, 0x5a, 0x1f, 0x45, 0xd8, 0xb4, 0x87, 0x38, 0x33, 0x31, 0x02, 0x8d, 0x85, 0xe1, 0x5f, 0x34,
|
||||
0x00, 0x79, 0xe0, 0xfb, 0x0e, 0xe9, 0x12, 0x57, 0x60, 0x47, 0xde, 0xef, 0xb9, 0xbe, 0x2a, 0xfd,
|
||||
0xfd, 0x6c, 0xda, 0x7c, 0x32, 0xa4, 0xb4, 0xe3, 0xf8, 0x98, 0xce, 0x42, 0x51, 0x8e, 0xcf, 0x30,
|
||||
0x9d, 0xe7, 0x5c, 0x3e, 0xeb, 0x6b, 0x33, 0xd3, 0x99, 0xff, 0xfd, 0x32, 0x4e, 0xa7, 0xb2, 0xa3,
|
||||
0x48, 0x12, 0x7e, 0x01, 0xf6, 0xa2, 0xaf, 0x3b, 0xe4, 0x79, 0xe2, 0x98, 0x3a, 0x84, 0xf7, 0xb9,
|
||||
0x20, 0x5d, 0x7d, 0x5d, 0x2e, 0x73, 0x45, 0x31, 0xf7, 0x50, 0x2e, 0x0a, 0x15, 0xb0, 0x61, 0x17,
|
||||
0x18, 0x51, 0x7b, 0x08, 0xf7, 0x4e, 0xdc, 0x9f, 0x8e, 0x78, 0x1b, 0x3b, 0xa3, 0x5b, 0xcb, 0x86,
|
||||
0x74, 0xf0, 0xee, 0x70, 0x60, 0x18, 0xf5, 0xe9, 0x50, 0x34, 0x4b, 0x0b, 0xfe, 0x1a, 0xe8, 0xb8,
|
||||
0xc8, 0xcf, 0xa6, 0xf4, 0xf3, 0xc3, 0xb0, 0xe7, 0x14, 0x3a, 0x28, 0x64, 0x43, 0x1f, 0x6c, 0xe2,
|
||||
0xe4, 0x77, 0x36, 0xd7, 0xb7, 0xe4, 0x2e, 0x7c, 0x6f, 0xca, 0x3a, 0xa4, 0x3e, 0xcd, 0x2d, 0x5d,
|
||||
0xa5, 0x71, 0x33, 0x65, 0xe0, 0x28, 0xa3, 0x0e, 0xaf, 0x00, 0xc4, 0xe9, 0xdf, 0x02, 0x5c, 0x87,
|
||||
0x33, 0x8f, 0x98, 0xcc, 0xbf, 0x84, 0x71, 0xa9, 0x65, 0x4c, 0x1c, 0xe5, 0xf8, 0x80, 0x8f, 0xc1,
|
||||
0x8e, 0x1a, 0x7d, 0xea, 0x72, 0x7c, 0x4e, 0x9a, 0x7d, 0xde, 0x16, 0x0e, 0xd7, 0xb7, 0x65, 0x7f,
|
||||
0xd3, 0x87, 0x03, 0x63, 0xe7, 0x30, 0xc7, 0x8e, 0x72, 0x59, 0xf0, 0x33, 0xb0, 0x79, 0xee, 0xb1,
|
||||
0x16, 0xb5, 0x6d, 0xe2, 0x46, 0x4a, 0x3b, 0x52, 0x69, 0x27, 0xcc, 0xc4, 0x71, 0xca, 0x86, 0x32,
|
||||
0x68, 0xc8, 0xc1, 0xae, 0x52, 0x6e, 0x30, 0xaf, 0x7d, 0xea, 0x05, 0xae, 0x08, 0x5b, 0x2a, 0xd7,
|
||||
0x77, 0xe3, 0x63, 0x64, 0xf7, 0x30, 0x0f, 0xf0, 0x72, 0x60, 0xdc, 0xce, 0x69, 0xe9, 0x09, 0x10,
|
||||
0xca, 0xd7, 0x86, 0x36, 0x00, 0xb2, 0x0f, 0x8c, 0xb6, 0xdc, 0xde, 0xcc, 0x4f, 0x40, 0x14, 0x83,
|
||||
0xd3, 0xbb, 0x6e, 0x3d, 0x3c, 0x99, 0xc7, 0x66, 0x34, 0xa1, 0x5b, 0xfd, 0x9b, 0x06, 0x6e, 0x16,
|
||||
0x32, 0xe1, 0x27, 0x89, 0xff, 0x0d, 0xd5, 0xd4, 0xff, 0x06, 0x98, 0x25, 0xbe, 0x81, 0xdf, 0x0d,
|
||||
0x5f, 0x6b, 0x40, 0x2f, 0xea, 0x9e, 0xf0, 0xe3, 0x44, 0x80, 0xef, 0xa4, 0x02, 0xdc, 0xca, 0xf0,
|
||||
0xde, 0x40, 0x7c, 0xff, 0xd0, 0xc0, 0x5e, 0xfe, 0xe9, 0x01, 0xef, 0x25, 0xa2, 0x33, 0x52, 0xd1,
|
||||
0x6d, 0xa4, 0x58, 0x2a, 0xb6, 0xdf, 0x83, 0x75, 0x75, 0xc6, 0x24, 0xff, 0x36, 0x25, 0x62, 0x0c,
|
||||
0x2b, 0x29, 0xbc, 0x1e, 0x2a, 0x89, 0x68, 0xa5, 0xe5, 0x87, 0x5d, 0x72, 0x0c, 0xa5, 0xd4, 0xaa,
|
||||
0xff, 0xd4, 0xc0, 0x3b, 0x33, 0x4f, 0x07, 0x68, 0x25, 0x42, 0x37, 0x53, 0xa1, 0x57, 0x8a, 0x05,
|
||||
0xde, 0xcc, 0x4f, 0x27, 0xeb, 0x83, 0xe7, 0x2f, 0x2a, 0x73, 0xdf, 0xbc, 0xa8, 0xcc, 0x7d, 0xfb,
|
||||
0xa2, 0x32, 0xf7, 0xa7, 0x61, 0x45, 0x7b, 0x3e, 0xac, 0x68, 0xdf, 0x0c, 0x2b, 0xda, 0xb7, 0xc3,
|
||||
0x8a, 0xf6, 0xbf, 0x61, 0x45, 0xfb, 0xeb, 0xff, 0x2b, 0x73, 0xbf, 0x5d, 0x52, 0x72, 0xdf, 0x07,
|
||||
0x00, 0x00, 0xff, 0xff, 0x15, 0x2e, 0xf4, 0x72, 0x59, 0x16, 0x00, 0x00,
|
||||
}
|
||||
|
|
32
vendor/k8s.io/api/policy/v1beta1/generated.proto
generated
vendored
32
vendor/k8s.io/api/policy/v1beta1/generated.proto
generated
vendored
|
@ -42,7 +42,7 @@ message AllowedHostPath {
|
|||
// pathPrefix is the path prefix that the host volume must match.
|
||||
// It does not support `*`.
|
||||
// Trailing slashes are trimmed when validating the path prefix with a host path.
|
||||
//
|
||||
//
|
||||
// Examples:
|
||||
// `/foo` would allow `/foo`, `/foo/` and `/foo/bar`
|
||||
// `/foo` would not allow `/food` or `/etc/foo`
|
||||
|
@ -58,9 +58,11 @@ message AllowedHostPath {
|
|||
// created by POSTing to .../pods/<pod name>/evictions.
|
||||
message Eviction {
|
||||
// ObjectMeta describes the pod that is being evicted.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// DeleteOptions may be provided
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2;
|
||||
}
|
||||
|
||||
|
@ -97,17 +99,21 @@ message IDRange {
|
|||
|
||||
// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods
|
||||
message PodDisruptionBudget {
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Specification of the desired behavior of the PodDisruptionBudget.
|
||||
// +optional
|
||||
optional PodDisruptionBudgetSpec spec = 2;
|
||||
|
||||
// Most recently observed status of the PodDisruptionBudget.
|
||||
// +optional
|
||||
optional PodDisruptionBudgetStatus status = 3;
|
||||
}
|
||||
|
||||
// PodDisruptionBudgetList is a collection of PodDisruptionBudgets.
|
||||
message PodDisruptionBudgetList {
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
repeated PodDisruptionBudget items = 2;
|
||||
|
@ -119,16 +125,19 @@ message PodDisruptionBudgetSpec {
|
|||
// "selector" will still be available after the eviction, i.e. even in the
|
||||
// absence of the evicted pod. So for example you can prevent all voluntary
|
||||
// evictions by specifying "100%".
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1;
|
||||
|
||||
// Label query over pods whose evictions are managed by the disruption
|
||||
// budget.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2;
|
||||
|
||||
// An eviction is allowed if at most "maxUnavailable" pods selected by
|
||||
// "selector" are unavailable after the eviction, i.e. even in absence of
|
||||
// the evicted pod. For example, one can prevent all voluntary evictions
|
||||
// by specifying 0. This is a mutually exclusive setting with "minAvailable".
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3;
|
||||
}
|
||||
|
||||
|
@ -242,6 +251,12 @@ message PodSecurityPolicySpec {
|
|||
// runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.
|
||||
optional RunAsUserStrategyOptions runAsUser = 11;
|
||||
|
||||
// RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set.
|
||||
// If this field is omitted, the pod's RunAsGroup can take any value. This field requires the
|
||||
// RunAsGroup feature gate to be enabled.
|
||||
// +optional
|
||||
optional RunAsGroupStrategyOptions runAsGroup = 22;
|
||||
|
||||
// supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
|
||||
optional SupplementalGroupsStrategyOptions supplementalGroups = 12;
|
||||
|
||||
|
@ -281,7 +296,7 @@ message PodSecurityPolicySpec {
|
|||
// Each entry is either a plain sysctl name or ends in "*" in which case it is considered
|
||||
// as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed.
|
||||
// Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.
|
||||
//
|
||||
//
|
||||
// Examples:
|
||||
// e.g. "foo/*" allows "foo/bar", "foo/baz", etc.
|
||||
// e.g. "foo.*" allows "foo.bar", "foo.baz", etc.
|
||||
|
@ -291,7 +306,7 @@ message PodSecurityPolicySpec {
|
|||
// forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none.
|
||||
// Each entry is either a plain sysctl name or ends in "*" in which case it is considered
|
||||
// as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.
|
||||
//
|
||||
//
|
||||
// Examples:
|
||||
// e.g. "foo/*" forbids "foo/bar", "foo/baz", etc.
|
||||
// e.g. "foo.*" forbids "foo.bar", "foo.baz", etc.
|
||||
|
@ -305,6 +320,17 @@ message PodSecurityPolicySpec {
|
|||
repeated string allowedProcMountTypes = 21;
|
||||
}
|
||||
|
||||
// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.
|
||||
message RunAsGroupStrategyOptions {
|
||||
// rule is the strategy that will dictate the allowable RunAsGroup values that may be set.
|
||||
optional string rule = 1;
|
||||
|
||||
// ranges are the allowed ranges of gids that may be used. If you would like to force a single gid
|
||||
// then supply a single range with the same start and end. Required for MustRunAs.
|
||||
// +optional
|
||||
repeated IDRange ranges = 2;
|
||||
}
|
||||
|
||||
// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.
|
||||
message RunAsUserStrategyOptions {
|
||||
// rule is the strategy that will dictate the allowable RunAsUser values that may be set.
|
||||
|
|
57
vendor/k8s.io/api/policy/v1beta1/types.go
generated
vendored
57
vendor/k8s.io/api/policy/v1beta1/types.go
generated
vendored
|
@ -28,16 +28,19 @@ type PodDisruptionBudgetSpec struct {
|
|||
// "selector" will still be available after the eviction, i.e. even in the
|
||||
// absence of the evicted pod. So for example you can prevent all voluntary
|
||||
// evictions by specifying "100%".
|
||||
// +optional
|
||||
MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty" protobuf:"bytes,1,opt,name=minAvailable"`
|
||||
|
||||
// Label query over pods whose evictions are managed by the disruption
|
||||
// budget.
|
||||
// +optional
|
||||
Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"`
|
||||
|
||||
// An eviction is allowed if at most "maxUnavailable" pods selected by
|
||||
// "selector" are unavailable after the eviction, i.e. even in absence of
|
||||
// the evicted pod. For example, one can prevent all voluntary evictions
|
||||
// by specifying 0. This is a mutually exclusive setting with "minAvailable".
|
||||
// +optional
|
||||
MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,3,opt,name=maxUnavailable"`
|
||||
}
|
||||
|
||||
|
@ -81,12 +84,15 @@ type PodDisruptionBudgetStatus struct {
|
|||
|
||||
// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods
|
||||
type PodDisruptionBudget struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Specification of the desired behavior of the PodDisruptionBudget.
|
||||
// +optional
|
||||
Spec PodDisruptionBudgetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
// Most recently observed status of the PodDisruptionBudget.
|
||||
// +optional
|
||||
Status PodDisruptionBudgetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
}
|
||||
|
||||
|
@ -95,6 +101,7 @@ type PodDisruptionBudget struct {
|
|||
// PodDisruptionBudgetList is a collection of PodDisruptionBudgets.
|
||||
type PodDisruptionBudgetList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
@ -110,9 +117,11 @@ type Eviction struct {
|
|||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// ObjectMeta describes the pod that is being evicted.
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// DeleteOptions may be provided
|
||||
// +optional
|
||||
DeleteOptions *metav1.DeleteOptions `json:"deleteOptions,omitempty" protobuf:"bytes,2,opt,name=deleteOptions"`
|
||||
}
|
||||
|
||||
|
@ -174,6 +183,11 @@ type PodSecurityPolicySpec struct {
|
|||
SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"`
|
||||
// runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.
|
||||
RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"`
|
||||
// RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set.
|
||||
// If this field is omitted, the pod's RunAsGroup can take any value. This field requires the
|
||||
// RunAsGroup feature gate to be enabled.
|
||||
// +optional
|
||||
RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"`
|
||||
// supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.
|
||||
SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"`
|
||||
// fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.
|
||||
|
@ -245,6 +259,10 @@ type AllowedHostPath struct {
|
|||
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
|
||||
}
|
||||
|
||||
// AllowAllCapabilities can be used as a value for the PodSecurityPolicy.AllowAllCapabilities
|
||||
// field and means that any capabilities are allowed to be requested.
|
||||
var AllowAllCapabilities v1.Capability = "*"
|
||||
|
||||
// FSType gives strong typing to different file systems that are used by volumes.
|
||||
type FSType string
|
||||
|
||||
|
@ -268,8 +286,15 @@ var (
|
|||
DownwardAPI FSType = "downwardAPI"
|
||||
FC FSType = "fc"
|
||||
ConfigMap FSType = "configMap"
|
||||
VsphereVolume FSType = "vsphereVolume"
|
||||
Quobyte FSType = "quobyte"
|
||||
AzureDisk FSType = "azureDisk"
|
||||
PhotonPersistentDisk FSType = "photonPersistentDisk"
|
||||
StorageOS FSType = "storageos"
|
||||
Projected FSType = "projected"
|
||||
PortworxVolume FSType = "portworxVolume"
|
||||
ScaleIO FSType = "scaleIO"
|
||||
CSI FSType = "csi"
|
||||
All FSType = "*"
|
||||
)
|
||||
|
||||
|
@ -319,6 +344,16 @@ type RunAsUserStrategyOptions struct {
|
|||
Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
|
||||
}
|
||||
|
||||
// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.
|
||||
type RunAsGroupStrategyOptions struct {
|
||||
// rule is the strategy that will dictate the allowable RunAsGroup values that may be set.
|
||||
Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"`
|
||||
// ranges are the allowed ranges of gids that may be used. If you would like to force a single gid
|
||||
// then supply a single range with the same start and end. Required for MustRunAs.
|
||||
// +optional
|
||||
Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"`
|
||||
}
|
||||
|
||||
// IDRange provides a min/max of an allowed range of IDs.
|
||||
type IDRange struct {
|
||||
// min is the start of the range, inclusive.
|
||||
|
@ -340,6 +375,20 @@ const (
|
|||
RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny"
|
||||
)
|
||||
|
||||
// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a
|
||||
// Security Context.
|
||||
type RunAsGroupStrategy string
|
||||
|
||||
const (
|
||||
// RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid.
|
||||
// However, when RunAsGroup are specified, they have to fall in the defined range.
|
||||
RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs"
|
||||
// RunAsGroupStrategyMustRunAs means that container must run as a particular gid.
|
||||
RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs"
|
||||
// RunAsUserStrategyRunAsAny means that container may make requests for any gid.
|
||||
RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny"
|
||||
)
|
||||
|
||||
// FSGroupStrategyOptions defines the strategy type and options used to create the strategy.
|
||||
type FSGroupStrategyOptions struct {
|
||||
// rule is the strategy that will dictate what FSGroup is used in the SecurityContext.
|
||||
|
@ -356,6 +405,9 @@ type FSGroupStrategyOptions struct {
|
|||
type FSGroupStrategyType string
|
||||
|
||||
const (
|
||||
// FSGroupStrategyMayRunAs means that container does not need to have FSGroup of X applied.
|
||||
// However, when FSGroups are specified, they have to fall in the defined range.
|
||||
FSGroupStrategyMayRunAs FSGroupStrategyType = "MayRunAs"
|
||||
// FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied.
|
||||
FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs"
|
||||
// FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels.
|
||||
|
@ -378,6 +430,9 @@ type SupplementalGroupsStrategyOptions struct {
|
|||
type SupplementalGroupsStrategyType string
|
||||
|
||||
const (
|
||||
// SupplementalGroupsStrategyMayRunAs means that container does not need to run with a particular gid.
|
||||
// However, when gids are specified, they have to fall in the defined range.
|
||||
SupplementalGroupsStrategyMayRunAs SupplementalGroupsStrategyType = "MayRunAs"
|
||||
// SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid.
|
||||
SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs"
|
||||
// SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid.
|
||||
|
|
11
vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go
generated
vendored
11
vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go
generated
vendored
|
@ -162,6 +162,7 @@ var map_PodSecurityPolicySpec = map[string]string{
|
|||
"hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.",
|
||||
"seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.",
|
||||
"runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.",
|
||||
"runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.",
|
||||
"supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.",
|
||||
"fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.",
|
||||
"readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.",
|
||||
|
@ -178,6 +179,16 @@ func (PodSecurityPolicySpec) SwaggerDoc() map[string]string {
|
|||
return map_PodSecurityPolicySpec
|
||||
}
|
||||
|
||||
var map_RunAsGroupStrategyOptions = map[string]string{
|
||||
"": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.",
|
||||
"rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.",
|
||||
"ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.",
|
||||
}
|
||||
|
||||
func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string {
|
||||
return map_RunAsGroupStrategyOptions
|
||||
}
|
||||
|
||||
var map_RunAsUserStrategyOptions = map[string]string{
|
||||
"": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.",
|
||||
"rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.",
|
||||
|
|
26
vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go
generated
vendored
26
vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go
generated
vendored
|
@ -348,6 +348,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) {
|
|||
}
|
||||
in.SELinux.DeepCopyInto(&out.SELinux)
|
||||
in.RunAsUser.DeepCopyInto(&out.RunAsUser)
|
||||
if in.RunAsGroup != nil {
|
||||
in, out := &in.RunAsGroup, &out.RunAsGroup
|
||||
*out = new(RunAsGroupStrategyOptions)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups)
|
||||
in.FSGroup.DeepCopyInto(&out.FSGroup)
|
||||
if in.DefaultAllowPrivilegeEscalation != nil {
|
||||
|
@ -398,6 +403,27 @@ func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) {
|
||||
*out = *in
|
||||
if in.Ranges != nil {
|
||||
in, out := &in.Ranges, &out.Ranges
|
||||
*out = make([]IDRange, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions.
|
||||
func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(RunAsGroupStrategyOptions)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) {
|
||||
*out = *in
|
||||
|
|
1
vendor/k8s.io/api/rbac/v1/doc.go
generated
vendored
1
vendor/k8s.io/api/rbac/v1/doc.go
generated
vendored
|
@ -18,4 +18,5 @@ limitations under the License.
|
|||
// +k8s:openapi-gen=true
|
||||
|
||||
// +groupName=rbac.authorization.k8s.io
|
||||
|
||||
package v1 // import "k8s.io/api/rbac/v1"
|
||||
|
|
21
vendor/k8s.io/api/rbac/v1/generated.pb.go
generated
vendored
21
vendor/k8s.io/api/rbac/v1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1 is a generated protocol buffer package.
|
||||
|
@ -641,24 +640,6 @@ func (m *Subject) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
|
2
vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go
generated
vendored
2
vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go
generated
vendored
|
@ -28,7 +28,7 @@ package v1
|
|||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
|
||||
var map_AggregationRule = map[string]string{
|
||||
"": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole",
|
||||
"": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole",
|
||||
"clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added",
|
||||
}
|
||||
|
||||
|
|
1
vendor/k8s.io/api/rbac/v1alpha1/doc.go
generated
vendored
1
vendor/k8s.io/api/rbac/v1alpha1/doc.go
generated
vendored
|
@ -18,4 +18,5 @@ limitations under the License.
|
|||
// +k8s:openapi-gen=true
|
||||
|
||||
// +groupName=rbac.authorization.k8s.io
|
||||
|
||||
package v1alpha1 // import "k8s.io/api/rbac/v1alpha1"
|
||||
|
|
21
vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go
generated
vendored
21
vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1alpha1 is a generated protocol buffer package.
|
||||
|
@ -641,24 +640,6 @@ func (m *Subject) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
|
2
vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go
generated
vendored
2
vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go
generated
vendored
|
@ -28,7 +28,7 @@ package v1alpha1
|
|||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
|
||||
var map_AggregationRule = map[string]string{
|
||||
"": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole",
|
||||
"": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole",
|
||||
"clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added",
|
||||
}
|
||||
|
||||
|
|
1
vendor/k8s.io/api/rbac/v1beta1/doc.go
generated
vendored
1
vendor/k8s.io/api/rbac/v1beta1/doc.go
generated
vendored
|
@ -18,4 +18,5 @@ limitations under the License.
|
|||
// +k8s:openapi-gen=true
|
||||
|
||||
// +groupName=rbac.authorization.k8s.io
|
||||
|
||||
package v1beta1 // import "k8s.io/api/rbac/v1beta1"
|
||||
|
|
21
vendor/k8s.io/api/rbac/v1beta1/generated.pb.go
generated
vendored
21
vendor/k8s.io/api/rbac/v1beta1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1beta1 is a generated protocol buffer package.
|
||||
|
@ -641,24 +640,6 @@ func (m *Subject) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
|
2
vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go
generated
vendored
2
vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go
generated
vendored
|
@ -28,7 +28,7 @@ package v1beta1
|
|||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
|
||||
var map_AggregationRule = map[string]string{
|
||||
"": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole",
|
||||
"": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole",
|
||||
"clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added",
|
||||
}
|
||||
|
||||
|
|
1
vendor/k8s.io/api/scheduling/v1alpha1/doc.go
generated
vendored
1
vendor/k8s.io/api/scheduling/v1alpha1/doc.go
generated
vendored
|
@ -18,4 +18,5 @@ limitations under the License.
|
|||
// +k8s:openapi-gen=true
|
||||
|
||||
// +groupName=scheduling.k8s.io
|
||||
|
||||
package v1alpha1 // import "k8s.io/api/scheduling/v1alpha1"
|
||||
|
|
21
vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go
generated
vendored
21
vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1alpha1 is a generated protocol buffer package.
|
||||
|
@ -141,24 +140,6 @@ func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
|
1
vendor/k8s.io/api/scheduling/v1beta1/doc.go
generated
vendored
1
vendor/k8s.io/api/scheduling/v1beta1/doc.go
generated
vendored
|
@ -18,4 +18,5 @@ limitations under the License.
|
|||
// +k8s:openapi-gen=true
|
||||
|
||||
// +groupName=scheduling.k8s.io
|
||||
|
||||
package v1beta1 // import "k8s.io/api/scheduling/v1beta1"
|
||||
|
|
21
vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go
generated
vendored
21
vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1beta1 is a generated protocol buffer package.
|
||||
|
@ -141,24 +140,6 @@ func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
|
1
vendor/k8s.io/api/settings/v1alpha1/doc.go
generated
vendored
1
vendor/k8s.io/api/settings/v1alpha1/doc.go
generated
vendored
|
@ -18,4 +18,5 @@ limitations under the License.
|
|||
// +k8s:openapi-gen=true
|
||||
|
||||
// +groupName=settings.k8s.io
|
||||
|
||||
package v1alpha1 // import "k8s.io/api/settings/v1alpha1"
|
||||
|
|
21
vendor/k8s.io/api/settings/v1alpha1/generated.pb.go
generated
vendored
21
vendor/k8s.io/api/settings/v1alpha1/generated.pb.go
generated
vendored
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1alpha1 is a generated protocol buffer package.
|
||||
|
@ -216,24 +215,6 @@ func (m *PodPresetSpec) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue